Upgrade rust-bitcoin to 0.31
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
11 //! monitor updates.
12 //! There are a bunch of these as their handling is relatively error-prone so they are split out
13 //! here. See also the chanmon_fail_consistency fuzz test.
14
15 use bitcoin::blockdata::constants::genesis_block;
16 use bitcoin::hash_types::BlockHash;
17 use bitcoin::network::Network;
18 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
19 use crate::chain::transaction::OutPoint;
20 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
22 use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
23 use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
24 use crate::ln::msgs;
25 use crate::ln::types::ChannelId;
26 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
27 use crate::util::test_channel_signer::TestChannelSigner;
28 use crate::util::errors::APIError;
29 use crate::util::ser::{ReadableArgs, Writeable};
30 use crate::util::test_utils::TestBroadcaster;
31
32 use crate::ln::functional_test_utils::*;
33
34 use crate::util::test_utils;
35
36 use crate::io;
37 use bitcoin::hashes::Hash;
38 use crate::prelude::*;
39 use crate::sync::{Arc, Mutex};
40
41 #[test]
42 fn test_monitor_and_persister_update_fail() {
43         // Test that if both updating the `ChannelMonitor` and persisting the updated
44         // `ChannelMonitor` fail, then the failure from updating the `ChannelMonitor`
45         // one that gets returned.
46         let chanmon_cfgs = create_chanmon_cfgs(2);
47         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
48         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
49         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
50
51         // Create some initial channel
52         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
53         let outpoint = OutPoint { txid: chan.3.txid(), index: 0 };
54
55         // Rebalance the network to generate htlc in the two directions
56         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
57
58         // Route an HTLC from node 0 to node 1 (but don't settle)
59         let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
60
61         // Make a copy of the ChainMonitor so we can capture the error it returns on a
62         // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
63         // directly, the node would fail to be `Drop`'d at the end because its
64         // ChannelManager and ChainMonitor would be out of sync.
65         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
66         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
67         let persister = test_utils::TestPersister::new();
68         let tx_broadcaster = TestBroadcaster {
69                 txn_broadcasted: Mutex::new(Vec::new()),
70                 // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
71                 // that we are at height 200 so that it doesn't think we're violating the time lock
72                 // requirements of transactions broadcasted at that point.
73                 blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
74         };
75         let chain_mon = {
76                 let new_monitor = {
77                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
78                         let new_monitor = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
79                                 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
80                         assert!(new_monitor == *monitor);
81                         new_monitor
82                 };
83                 let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
84                 assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
85                 chain_mon
86         };
87         chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
88
89         // Try to update ChannelMonitor
90         nodes[1].node.claim_funds(preimage);
91         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
92         check_added_monitors!(nodes[1], 1);
93
94         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
95         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
96         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
97
98         {
99                 let mut node_0_per_peer_lock;
100                 let mut node_0_peer_state_lock;
101                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
102                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
103                                 // Check that the persister returns InProgress (and will never actually complete)
104                                 // as the monitor update errors.
105                                 if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor paused"); }
106                                 logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.").unwrap(), 1);
107
108                                 // Apply the monitor update to the original ChainMonitor, ensuring the
109                                 // ChannelManager and ChannelMonitor aren't out of sync.
110                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update),
111                                         ChannelMonitorUpdateStatus::Completed);
112                         } else { assert!(false); }
113                 } else {
114                         assert!(false);
115                 }
116         }
117
118         check_added_monitors!(nodes[0], 1);
119         expect_payment_sent(&nodes[0], preimage, None, false, false);
120 }
121
122 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
123         // Test that we can recover from a simple temporary monitor update failure optionally with
124         // a disconnect in between
125         let chanmon_cfgs = create_chanmon_cfgs(2);
126         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
127         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
128         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
129         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
130
131         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
132
133         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
134
135         {
136                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
137                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
138                         ), false, APIError::MonitorUpdateInProgress, {});
139                 check_added_monitors!(nodes[0], 1);
140         }
141
142         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
143         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
144         assert_eq!(nodes[0].node.list_channels().len(), 1);
145
146         if disconnect {
147                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
148                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
149                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
150                 reconnect_args.send_channel_ready = (true, true);
151                 reconnect_nodes(reconnect_args);
152         }
153
154         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
155         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
156         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
157         check_added_monitors!(nodes[0], 0);
158
159         let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
160         assert_eq!(events_2.len(), 1);
161         let payment_event = SendEvent::from_event(events_2.pop().unwrap());
162         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
163         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
164         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
165
166         expect_pending_htlcs_forwardable!(nodes[1]);
167
168         let events_3 = nodes[1].node.get_and_clear_pending_events();
169         assert_eq!(events_3.len(), 1);
170         match events_3[0] {
171                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
172                         assert_eq!(payment_hash_1, *payment_hash);
173                         assert_eq!(amount_msat, 1_000_000);
174                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
175                         assert_eq!(via_channel_id, Some(channel_id));
176                         match &purpose {
177                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
178                                         assert!(payment_preimage.is_none());
179                                         assert_eq!(payment_secret_1, *payment_secret);
180                                 },
181                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
182                         }
183                 },
184                 _ => panic!("Unexpected event"),
185         }
186
187         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
188
189         // Now set it to failed again...
190         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
191         {
192                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
193                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
194                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
195                         ), false, APIError::MonitorUpdateInProgress, {});
196                 check_added_monitors!(nodes[0], 1);
197         }
198
199         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
200         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
201         assert_eq!(nodes[0].node.list_channels().len(), 1);
202
203         if disconnect {
204                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
205                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
206                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
207         }
208
209         // ...and make sure we can force-close a frozen channel
210         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
211         check_added_monitors!(nodes[0], 1);
212         check_closed_broadcast!(nodes[0], true);
213
214         // TODO: Once we hit the chain with the failure transaction we should check that we get a
215         // PaymentPathFailed event
216
217         assert_eq!(nodes[0].node.list_channels().len(), 0);
218         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
219 }
220
221 #[test]
222 fn test_simple_monitor_temporary_update_fail() {
223         do_test_simple_monitor_temporary_update_fail(false);
224         do_test_simple_monitor_temporary_update_fail(true);
225 }
226
227 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
228         let disconnect_flags = 8 | 16;
229
230         // Test that we can recover from a temporary monitor update failure with some in-flight
231         // HTLCs going on at the same time potentially with some disconnection thrown in.
232         // * First we route a payment, then get a temporary monitor update failure when trying to
233         //   route a second payment. We then claim the first payment.
234         // * If disconnect_count is set, we will disconnect at this point (which is likely as
235         //   InProgress likely indicates net disconnect which resulted in failing to update the
236         //   ChannelMonitor on a watchtower).
237         // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
238         //   immediately, otherwise we wait disconnect and deliver them via the reconnect
239         //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
240         //   disconnect_count & !disconnect_flags is 0).
241         // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
242         //   through message sending, potentially disconnect/reconnecting multiple times based on
243         //   disconnect_count, to get the update_fulfill_htlc through.
244         // * We then walk through more message exchanges to get the original update_add_htlc
245         //   through, swapping message ordering based on disconnect_count & 8 and optionally
246         //   disconnect/reconnecting based on disconnect_count.
247         let chanmon_cfgs = create_chanmon_cfgs(2);
248         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
249         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
250         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
251         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
252
253         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
254
255         // Now try to send a second payment which will fail to send
256         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
257         {
258                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
259                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
260                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
261                         ), false, APIError::MonitorUpdateInProgress, {});
262                 check_added_monitors!(nodes[0], 1);
263         }
264
265         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
266         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
267         assert_eq!(nodes[0].node.list_channels().len(), 1);
268
269         // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
270         // but nodes[0] won't respond since it is frozen.
271         nodes[1].node.claim_funds(payment_preimage_1);
272         check_added_monitors!(nodes[1], 1);
273         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
274
275         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
276         assert_eq!(events_2.len(), 1);
277         let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
278                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
279                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
280                         assert!(update_add_htlcs.is_empty());
281                         assert_eq!(update_fulfill_htlcs.len(), 1);
282                         assert!(update_fail_htlcs.is_empty());
283                         assert!(update_fail_malformed_htlcs.is_empty());
284                         assert!(update_fee.is_none());
285
286                         if (disconnect_count & 16) == 0 {
287                                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
288                                 let events_3 = nodes[0].node.get_and_clear_pending_events();
289                                 assert_eq!(events_3.len(), 1);
290                                 match events_3[0] {
291                                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
292                                                 assert_eq!(*payment_preimage, payment_preimage_1);
293                                                 assert_eq!(*payment_hash, payment_hash_1);
294                                         },
295                                         _ => panic!("Unexpected event"),
296                                 }
297
298                                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
299                                 check_added_monitors!(nodes[0], 1);
300                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
301                         }
302
303                         (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
304                 },
305                 _ => panic!("Unexpected event"),
306         };
307
308         if disconnect_count & !disconnect_flags > 0 {
309                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
310                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
311         }
312
313         // Now fix monitor updating...
314         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
315         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
316         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
317         check_added_monitors!(nodes[0], 0);
318
319         macro_rules! disconnect_reconnect_peers { () => { {
320                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
321                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
322
323                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
324                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
325                 }, true).unwrap();
326                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
327                 assert_eq!(reestablish_1.len(), 1);
328                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
329                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
330                 }, false).unwrap();
331                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
332                 assert_eq!(reestablish_2.len(), 1);
333
334                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
335                 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
336                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
337                 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
338
339                 assert!(as_resp.0.is_none());
340                 assert!(bs_resp.0.is_none());
341
342                 (reestablish_1, reestablish_2, as_resp, bs_resp)
343         } } }
344
345         let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
346                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
347                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
348
349                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
350                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
351                 }, true).unwrap();
352                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
353                 assert_eq!(reestablish_1.len(), 1);
354                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
355                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
356                 }, false).unwrap();
357                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
358                 assert_eq!(reestablish_2.len(), 1);
359
360                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
361                 check_added_monitors!(nodes[0], 0);
362                 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
363                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
364                 check_added_monitors!(nodes[1], 0);
365                 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
366
367                 assert!(as_resp.0.is_none());
368                 assert!(bs_resp.0.is_none());
369
370                 assert!(bs_resp.1.is_none());
371                 if (disconnect_count & 16) == 0 {
372                         assert!(bs_resp.2.is_none());
373
374                         assert!(as_resp.1.is_some());
375                         assert!(as_resp.2.is_some());
376                         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
377                 } else {
378                         assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
379                         assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
380                         assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
381                         assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
382                         assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
383                         assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
384
385                         assert!(as_resp.1.is_none());
386
387                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
388                         let events_3 = nodes[0].node.get_and_clear_pending_events();
389                         assert_eq!(events_3.len(), 1);
390                         match events_3[0] {
391                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
392                                         assert_eq!(*payment_preimage, payment_preimage_1);
393                                         assert_eq!(*payment_hash, payment_hash_1);
394                                 },
395                                 _ => panic!("Unexpected event"),
396                         }
397
398                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
399                         let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
400                         // No commitment_signed so get_event_msg's assert(len == 1) passes
401                         check_added_monitors!(nodes[0], 1);
402
403                         as_resp.1 = Some(as_resp_raa);
404                         bs_resp.2 = None;
405                 }
406
407                 if disconnect_count & !disconnect_flags > 1 {
408                         let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
409
410                         if (disconnect_count & 16) == 0 {
411                                 assert!(reestablish_1 == second_reestablish_1);
412                                 assert!(reestablish_2 == second_reestablish_2);
413                         }
414                         assert!(as_resp == second_as_resp);
415                         assert!(bs_resp == second_bs_resp);
416                 }
417
418                 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
419         } else {
420                 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
421                 assert_eq!(events_4.len(), 2);
422                 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
423                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
424                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
425                                 msg.clone()
426                         },
427                         _ => panic!("Unexpected event"),
428                 })
429         };
430
431         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
432
433         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
434         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
435         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
436         // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
437         check_added_monitors!(nodes[1], 1);
438
439         if disconnect_count & !disconnect_flags > 2 {
440                 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
441
442                 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
443                 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
444
445                 assert!(as_resp.2.is_none());
446                 assert!(bs_resp.2.is_none());
447         }
448
449         let as_commitment_update;
450         let bs_second_commitment_update;
451
452         macro_rules! handle_bs_raa { () => {
453                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
454                 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
455                 assert!(as_commitment_update.update_add_htlcs.is_empty());
456                 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
457                 assert!(as_commitment_update.update_fail_htlcs.is_empty());
458                 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
459                 assert!(as_commitment_update.update_fee.is_none());
460                 check_added_monitors!(nodes[0], 1);
461         } }
462
463         macro_rules! handle_initial_raa { () => {
464                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
465                 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
466                 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
467                 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
468                 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
469                 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
470                 assert!(bs_second_commitment_update.update_fee.is_none());
471                 check_added_monitors!(nodes[1], 1);
472         } }
473
474         if (disconnect_count & 8) == 0 {
475                 handle_bs_raa!();
476
477                 if disconnect_count & !disconnect_flags > 3 {
478                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
479
480                         assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
481                         assert!(bs_resp.1.is_none());
482
483                         assert!(as_resp.2.unwrap() == as_commitment_update);
484                         assert!(bs_resp.2.is_none());
485
486                         assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
487                 }
488
489                 handle_initial_raa!();
490
491                 if disconnect_count & !disconnect_flags > 4 {
492                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
493
494                         assert!(as_resp.1.is_none());
495                         assert!(bs_resp.1.is_none());
496
497                         assert!(as_resp.2.unwrap() == as_commitment_update);
498                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
499                 }
500         } else {
501                 handle_initial_raa!();
502
503                 if disconnect_count & !disconnect_flags > 3 {
504                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
505
506                         assert!(as_resp.1.is_none());
507                         assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
508
509                         assert!(as_resp.2.is_none());
510                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
511
512                         assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
513                 }
514
515                 handle_bs_raa!();
516
517                 if disconnect_count & !disconnect_flags > 4 {
518                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
519
520                         assert!(as_resp.1.is_none());
521                         assert!(bs_resp.1.is_none());
522
523                         assert!(as_resp.2.unwrap() == as_commitment_update);
524                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
525                 }
526         }
527
528         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
529         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
530         // No commitment_signed so get_event_msg's assert(len == 1) passes
531         check_added_monitors!(nodes[0], 1);
532
533         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
534         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
535         // No commitment_signed so get_event_msg's assert(len == 1) passes
536         check_added_monitors!(nodes[1], 1);
537
538         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
539         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
540         check_added_monitors!(nodes[1], 1);
541
542         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
543         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
544         check_added_monitors!(nodes[0], 1);
545         expect_payment_path_successful!(nodes[0]);
546
547         expect_pending_htlcs_forwardable!(nodes[1]);
548
549         let events_5 = nodes[1].node.get_and_clear_pending_events();
550         assert_eq!(events_5.len(), 1);
551         match events_5[0] {
552                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
553                         assert_eq!(payment_hash_2, *payment_hash);
554                         assert_eq!(amount_msat, 1_000_000);
555                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
556                         assert_eq!(via_channel_id, Some(channel_id));
557                         match &purpose {
558                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
559                                         assert!(payment_preimage.is_none());
560                                         assert_eq!(payment_secret_2, *payment_secret);
561                                 },
562                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
563                         }
564                 },
565                 _ => panic!("Unexpected event"),
566         }
567
568         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
569 }
570
571 #[test]
572 fn test_monitor_temporary_update_fail_a() {
573         do_test_monitor_temporary_update_fail(0);
574         do_test_monitor_temporary_update_fail(1);
575         do_test_monitor_temporary_update_fail(2);
576         do_test_monitor_temporary_update_fail(3);
577         do_test_monitor_temporary_update_fail(4);
578         do_test_monitor_temporary_update_fail(5);
579 }
580
581 #[test]
582 fn test_monitor_temporary_update_fail_b() {
583         do_test_monitor_temporary_update_fail(2 | 8);
584         do_test_monitor_temporary_update_fail(3 | 8);
585         do_test_monitor_temporary_update_fail(4 | 8);
586         do_test_monitor_temporary_update_fail(5 | 8);
587 }
588
589 #[test]
590 fn test_monitor_temporary_update_fail_c() {
591         do_test_monitor_temporary_update_fail(1 | 16);
592         do_test_monitor_temporary_update_fail(2 | 16);
593         do_test_monitor_temporary_update_fail(3 | 16);
594         do_test_monitor_temporary_update_fail(2 | 8 | 16);
595         do_test_monitor_temporary_update_fail(3 | 8 | 16);
596 }
597
598 #[test]
599 fn test_monitor_update_fail_cs() {
600         // Tests handling of a monitor update failure when processing an incoming commitment_signed
601         let chanmon_cfgs = create_chanmon_cfgs(2);
602         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
603         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
604         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
605         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
606
607         let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
608         {
609                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
610                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
611                 check_added_monitors!(nodes[0], 1);
612         }
613
614         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
615         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
616
617         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
618         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
619         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
620         check_added_monitors!(nodes[1], 1);
621         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
622
623         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
624         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
625         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
626         check_added_monitors!(nodes[1], 0);
627         let responses = nodes[1].node.get_and_clear_pending_msg_events();
628         assert_eq!(responses.len(), 2);
629
630         match responses[0] {
631                 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
632                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
633                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
634                         check_added_monitors!(nodes[0], 1);
635                 },
636                 _ => panic!("Unexpected event"),
637         }
638         match responses[1] {
639                 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
640                         assert!(updates.update_add_htlcs.is_empty());
641                         assert!(updates.update_fulfill_htlcs.is_empty());
642                         assert!(updates.update_fail_htlcs.is_empty());
643                         assert!(updates.update_fail_malformed_htlcs.is_empty());
644                         assert!(updates.update_fee.is_none());
645                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
646
647                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
648                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
649                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
650                         check_added_monitors!(nodes[0], 1);
651                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
652                 },
653                 _ => panic!("Unexpected event"),
654         }
655
656         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
657         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
658         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
659         check_added_monitors!(nodes[0], 0);
660
661         let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
662         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
663         check_added_monitors!(nodes[1], 1);
664
665         expect_pending_htlcs_forwardable!(nodes[1]);
666
667         let events = nodes[1].node.get_and_clear_pending_events();
668         assert_eq!(events.len(), 1);
669         match events[0] {
670                 Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
671                         assert_eq!(payment_hash, our_payment_hash);
672                         assert_eq!(amount_msat, 1_000_000);
673                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
674                         assert_eq!(via_channel_id, Some(channel_id));
675                         match &purpose {
676                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
677                                         assert!(payment_preimage.is_none());
678                                         assert_eq!(our_payment_secret, *payment_secret);
679                                 },
680                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
681                         }
682                 },
683                 _ => panic!("Unexpected event"),
684         };
685
686         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
687 }
688
689 #[test]
690 fn test_monitor_update_fail_no_rebroadcast() {
691         // Tests handling of a monitor update failure when no message rebroadcasting on
692         // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
693         // fuzz tests.
694         let chanmon_cfgs = create_chanmon_cfgs(2);
695         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
696         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
697         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
698         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
699
700         let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
701         {
702                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
703                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap();
704                 check_added_monitors!(nodes[0], 1);
705         }
706
707         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
708         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
709         let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
710
711         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
712         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
713         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
714         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
715         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
716         check_added_monitors!(nodes[1], 1);
717
718         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
719         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
720         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
721         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
722         check_added_monitors!(nodes[1], 0);
723         expect_pending_htlcs_forwardable!(nodes[1]);
724
725         let events = nodes[1].node.get_and_clear_pending_events();
726         assert_eq!(events.len(), 1);
727         match events[0] {
728                 Event::PaymentClaimable { payment_hash, .. } => {
729                         assert_eq!(payment_hash, our_payment_hash);
730                 },
731                 _ => panic!("Unexpected event"),
732         }
733
734         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
735 }
736
737 #[test]
738 fn test_monitor_update_raa_while_paused() {
739         // Tests handling of an RAA while monitor updating has already been marked failed.
740         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
741         let chanmon_cfgs = create_chanmon_cfgs(2);
742         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
743         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
744         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
745         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
746
747         send_payment(&nodes[0], &[&nodes[1]], 5000000);
748         let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
749         {
750                 nodes[0].node.send_payment_with_route(&route, our_payment_hash_1,
751                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
752                 check_added_monitors!(nodes[0], 1);
753         }
754         let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
755
756         let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000);
757         {
758                 nodes[1].node.send_payment_with_route(&route, our_payment_hash_2,
759                         RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap();
760                 check_added_monitors!(nodes[1], 1);
761         }
762         let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
763
764         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
765         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
766         check_added_monitors!(nodes[1], 1);
767         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
768
769         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
770         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
771         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
772         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
773         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
774         check_added_monitors!(nodes[0], 1);
775         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
776
777         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
778         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
779         check_added_monitors!(nodes[0], 1);
780
781         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
782         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
783         check_added_monitors!(nodes[0], 0);
784
785         let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
786         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
787         check_added_monitors!(nodes[1], 1);
788         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
789
790         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
791         check_added_monitors!(nodes[1], 1);
792         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
793
794         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
795         check_added_monitors!(nodes[0], 1);
796         let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
797
798         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
799         check_added_monitors!(nodes[0], 1);
800         expect_pending_htlcs_forwardable!(nodes[0]);
801         expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
802
803         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
804         check_added_monitors!(nodes[1], 1);
805         expect_pending_htlcs_forwardable!(nodes[1]);
806         expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
807
808         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
809         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
810 }
811
812 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
813         // Tests handling of a monitor update failure when processing an incoming RAA
814         let chanmon_cfgs = create_chanmon_cfgs(3);
815         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
816         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
817         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
818         create_announced_chan_between_nodes(&nodes, 0, 1);
819         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
820
821         // Rebalance a bit so that we can send backwards from 2 to 1.
822         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
823
824         // Route a first payment that we'll fail backwards
825         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
826
827         // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
828         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
829         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
830         check_added_monitors!(nodes[2], 1);
831
832         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
833         assert!(updates.update_add_htlcs.is_empty());
834         assert!(updates.update_fulfill_htlcs.is_empty());
835         assert_eq!(updates.update_fail_htlcs.len(), 1);
836         assert!(updates.update_fail_malformed_htlcs.is_empty());
837         assert!(updates.update_fee.is_none());
838         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
839
840         let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
841         check_added_monitors!(nodes[0], 0);
842
843         // While the second channel is AwaitingRAA, forward a second payment to get it into the
844         // holding cell.
845         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
846         {
847                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
848                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
849                 check_added_monitors!(nodes[0], 1);
850         }
851
852         let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
853         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
854         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
855
856         expect_pending_htlcs_forwardable!(nodes[1]);
857         check_added_monitors!(nodes[1], 0);
858         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
859
860         // Now fail monitor updating.
861         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
862         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
863         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
864         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
865         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
866         check_added_monitors!(nodes[1], 1);
867
868         // Forward a third payment which will also be added to the holding cell, despite the channel
869         // being paused waiting a monitor update.
870         let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
871         {
872                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
873                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
874                 check_added_monitors!(nodes[0], 1);
875         }
876
877         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
878         send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
879         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
880         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
881         check_added_monitors!(nodes[1], 0);
882
883         // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
884         // and not forwarded.
885         expect_pending_htlcs_forwardable!(nodes[1]);
886         check_added_monitors!(nodes[1], 0);
887         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
888
889         let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
890                 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
891                 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
892                 nodes[2].node.send_payment_with_route(&route, payment_hash_4,
893                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
894                 check_added_monitors!(nodes[2], 1);
895
896                 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
897                 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
898                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
899                 check_added_monitors!(nodes[1], 1);
900                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
901                 (Some(payment_preimage_4), Some(payment_hash_4))
902         } else { (None, None) };
903
904         // Restore monitor updating, ensuring we immediately get a fail-back update and a
905         // update_add update.
906         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
907         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
908         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
909         check_added_monitors!(nodes[1], 0);
910         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
911         check_added_monitors!(nodes[1], 1);
912
913         let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
914         if test_ignore_second_cs {
915                 assert_eq!(events_3.len(), 3);
916         } else {
917                 assert_eq!(events_3.len(), 2);
918         }
919
920         // Note that the ordering of the events for different nodes is non-prescriptive, though the
921         // ordering of the two events that both go to nodes[2] have to stay in the same order.
922         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3);
923         let messages_a = match nodes_0_event {
924                 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
925                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
926                         assert!(updates.update_fulfill_htlcs.is_empty());
927                         assert_eq!(updates.update_fail_htlcs.len(), 1);
928                         assert!(updates.update_fail_malformed_htlcs.is_empty());
929                         assert!(updates.update_add_htlcs.is_empty());
930                         assert!(updates.update_fee.is_none());
931                         (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
932                 },
933                 _ => panic!("Unexpected event type!"),
934         };
935
936         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
937         let send_event_b = SendEvent::from_event(nodes_2_event);
938         assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
939
940         let raa = if test_ignore_second_cs {
941                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
942                 match nodes_2_event {
943                         MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
944                                 assert_eq!(node_id, nodes[2].node.get_our_node_id());
945                                 Some(msg.clone())
946                         },
947                         _ => panic!("Unexpected event"),
948                 }
949         } else { None };
950
951         // Now deliver the new messages...
952
953         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
954         commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
955         expect_payment_failed!(nodes[0], payment_hash_1, true);
956
957         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
958         let as_cs;
959         if test_ignore_second_cs {
960                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
961                 check_added_monitors!(nodes[2], 1);
962                 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
963                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
964                 check_added_monitors!(nodes[2], 1);
965                 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
966                 assert!(bs_cs.update_add_htlcs.is_empty());
967                 assert!(bs_cs.update_fail_htlcs.is_empty());
968                 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
969                 assert!(bs_cs.update_fulfill_htlcs.is_empty());
970                 assert!(bs_cs.update_fee.is_none());
971
972                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
973                 check_added_monitors!(nodes[1], 1);
974                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
975
976                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
977                 check_added_monitors!(nodes[1], 1);
978         } else {
979                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
980                 check_added_monitors!(nodes[2], 1);
981
982                 let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
983                 // As both messages are for nodes[1], they're in order.
984                 assert_eq!(bs_revoke_and_commit.len(), 2);
985                 match bs_revoke_and_commit[0] {
986                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
987                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
988                                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
989                                 check_added_monitors!(nodes[1], 1);
990                         },
991                         _ => panic!("Unexpected event"),
992                 }
993
994                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
995
996                 match bs_revoke_and_commit[1] {
997                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
998                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
999                                 assert!(updates.update_add_htlcs.is_empty());
1000                                 assert!(updates.update_fail_htlcs.is_empty());
1001                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
1002                                 assert!(updates.update_fulfill_htlcs.is_empty());
1003                                 assert!(updates.update_fee.is_none());
1004                                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1005                                 check_added_monitors!(nodes[1], 1);
1006                         },
1007                         _ => panic!("Unexpected event"),
1008                 }
1009         }
1010
1011         assert_eq!(as_cs.update_add_htlcs.len(), 1);
1012         assert!(as_cs.update_fail_htlcs.is_empty());
1013         assert!(as_cs.update_fail_malformed_htlcs.is_empty());
1014         assert!(as_cs.update_fulfill_htlcs.is_empty());
1015         assert!(as_cs.update_fee.is_none());
1016         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1017
1018
1019         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
1020         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
1021         check_added_monitors!(nodes[2], 1);
1022         let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1023
1024         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1025         check_added_monitors!(nodes[2], 1);
1026         let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1027
1028         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
1029         check_added_monitors!(nodes[1], 1);
1030         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1031
1032         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1033         check_added_monitors!(nodes[1], 1);
1034         let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1035
1036         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1037         check_added_monitors!(nodes[2], 1);
1038         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1039
1040         expect_pending_htlcs_forwardable!(nodes[2]);
1041
1042         let events_6 = nodes[2].node.get_and_clear_pending_events();
1043         assert_eq!(events_6.len(), 2);
1044         match events_6[0] {
1045                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
1046                 _ => panic!("Unexpected event"),
1047         };
1048         match events_6[1] {
1049                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1050                 _ => panic!("Unexpected event"),
1051         };
1052
1053         if test_ignore_second_cs {
1054                 expect_pending_htlcs_forwardable!(nodes[1]);
1055                 check_added_monitors!(nodes[1], 1);
1056
1057                 send_event = SendEvent::from_node(&nodes[1]);
1058                 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
1059                 assert_eq!(send_event.msgs.len(), 1);
1060                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
1061                 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
1062
1063                 expect_pending_htlcs_forwardable!(nodes[0]);
1064
1065                 let events_9 = nodes[0].node.get_and_clear_pending_events();
1066                 assert_eq!(events_9.len(), 1);
1067                 match events_9[0] {
1068                         Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
1069                         _ => panic!("Unexpected event"),
1070                 };
1071                 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
1072         }
1073
1074         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
1075 }
1076
1077 #[test]
1078 fn test_monitor_update_fail_raa() {
1079         do_test_monitor_update_fail_raa(false);
1080         do_test_monitor_update_fail_raa(true);
1081 }
1082
1083 #[test]
1084 fn test_monitor_update_fail_reestablish() {
1085         // Simple test for message retransmission after monitor update failure on
1086         // channel_reestablish generating a monitor update (which comes from freeing holding cell
1087         // HTLCs).
1088         let chanmon_cfgs = create_chanmon_cfgs(3);
1089         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1090         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1091         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1092         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1093         create_announced_chan_between_nodes(&nodes, 1, 2);
1094
1095         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
1096
1097         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1098         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1099
1100         nodes[2].node.claim_funds(payment_preimage);
1101         check_added_monitors!(nodes[2], 1);
1102         expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
1103
1104         let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1105         assert!(updates.update_add_htlcs.is_empty());
1106         assert!(updates.update_fail_htlcs.is_empty());
1107         assert!(updates.update_fail_malformed_htlcs.is_empty());
1108         assert!(updates.update_fee.is_none());
1109         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1110         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1111         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
1112         check_added_monitors!(nodes[1], 1);
1113         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1114         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1115
1116         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1117         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1118                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1119         }, true).unwrap();
1120         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1121                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1122         }, false).unwrap();
1123
1124         let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1125         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1126
1127         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1128
1129         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1130         assert_eq!(
1131                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1132                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1133
1134         nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
1135         check_added_monitors!(nodes[1], 1);
1136
1137         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1138         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1139
1140         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1141                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1142         }, true).unwrap();
1143         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1144                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1145         }, false).unwrap();
1146
1147         assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
1148         assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
1149
1150         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1151         assert_eq!(
1152                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1153                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1154
1155         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1156         check_added_monitors!(nodes[1], 0);
1157         assert_eq!(
1158                 get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
1159                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1160
1161         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1162         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1163         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1164         check_added_monitors!(nodes[1], 0);
1165
1166         updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1167         assert!(updates.update_add_htlcs.is_empty());
1168         assert!(updates.update_fail_htlcs.is_empty());
1169         assert!(updates.update_fail_malformed_htlcs.is_empty());
1170         assert!(updates.update_fee.is_none());
1171         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1172         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1173         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1174         expect_payment_sent!(nodes[0], payment_preimage);
1175 }
1176
1177 #[test]
1178 fn raa_no_response_awaiting_raa_state() {
1179         // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1180         // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1181         // in question (assuming it intends to respond with a CS after monitor updating is restored).
1182         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1183         let chanmon_cfgs = create_chanmon_cfgs(2);
1184         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1185         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1186         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1187         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1188
1189         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1190         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1191         let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
1192
1193         // Queue up two payments - one will be delivered right away, one immediately goes into the
1194         // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1195         // immediately after a CS. By setting failing the monitor update failure from the CS (which
1196         // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1197         // generation during RAA while in monitor-update-failed state.
1198         {
1199                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1200                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1201                 check_added_monitors!(nodes[0], 1);
1202                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1203                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1204                 check_added_monitors!(nodes[0], 0);
1205         }
1206
1207         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1208         assert_eq!(events.len(), 1);
1209         let payment_event = SendEvent::from_event(events.pop().unwrap());
1210         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1211         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1212         check_added_monitors!(nodes[1], 1);
1213
1214         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1215         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1216         check_added_monitors!(nodes[0], 1);
1217         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1218         assert_eq!(events.len(), 1);
1219         let payment_event = SendEvent::from_event(events.pop().unwrap());
1220
1221         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1222         check_added_monitors!(nodes[0], 1);
1223         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1224
1225         // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1226         // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1227         // then restore channel monitor updates.
1228         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1229         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1230         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1231         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1232         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1233         check_added_monitors!(nodes[1], 1);
1234         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1235
1236         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1237         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1238         check_added_monitors!(nodes[1], 1);
1239
1240         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1241         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1242         // nodes[1] should be AwaitingRAA here!
1243         check_added_monitors!(nodes[1], 0);
1244         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1245         expect_pending_htlcs_forwardable!(nodes[1]);
1246         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1247
1248         // We send a third payment here, which is somewhat of a redundant test, but the
1249         // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1250         // commitment transaction states) whereas here we can explicitly check for it.
1251         {
1252                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
1253                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1254                 check_added_monitors!(nodes[0], 0);
1255                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1256         }
1257         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1258         check_added_monitors!(nodes[0], 1);
1259         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1260         assert_eq!(events.len(), 1);
1261         let payment_event = SendEvent::from_event(events.pop().unwrap());
1262
1263         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1264         check_added_monitors!(nodes[0], 1);
1265         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1266
1267         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1268         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1269         check_added_monitors!(nodes[1], 1);
1270         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1271
1272         // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1273         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1274         check_added_monitors!(nodes[1], 1);
1275         expect_pending_htlcs_forwardable!(nodes[1]);
1276         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1277         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1278
1279         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1280         check_added_monitors!(nodes[0], 1);
1281
1282         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1283         check_added_monitors!(nodes[0], 1);
1284         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1285
1286         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1287         check_added_monitors!(nodes[1], 1);
1288         expect_pending_htlcs_forwardable!(nodes[1]);
1289         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
1290
1291         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1292         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1293         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
1294 }
1295
1296 #[test]
1297 fn claim_while_disconnected_monitor_update_fail() {
1298         // Test for claiming a payment while disconnected and then having the resulting
1299         // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1300         // contrived case for nodes with network instability.
1301         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1302         // code introduced a regression in this test (specifically, this caught a removal of the
1303         // channel_reestablish handling ensuring the order was sensical given the messages used).
1304         let chanmon_cfgs = create_chanmon_cfgs(2);
1305         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1306         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1307         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1308         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1309
1310         // Forward a payment for B to claim
1311         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1312
1313         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1314         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1315
1316         nodes[1].node.claim_funds(payment_preimage_1);
1317         check_added_monitors!(nodes[1], 1);
1318         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1319
1320         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1321                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1322         }, true).unwrap();
1323         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1324                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1325         }, false).unwrap();
1326
1327         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1328         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1329
1330         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1331         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1332
1333         // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1334         // update.
1335         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1336
1337         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1338         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1339         check_added_monitors!(nodes[1], 1);
1340         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1341
1342         // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1343         // the monitor still failed
1344         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1345         {
1346                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1347                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1348                 check_added_monitors!(nodes[0], 1);
1349         }
1350
1351         let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1352         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1353         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1354         check_added_monitors!(nodes[1], 1);
1355         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1356         // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1357         // until we've channel_monitor_update'd and updated for the new commitment transaction.
1358
1359         // Now un-fail the monitor, which will result in B sending its original commitment update,
1360         // receiving the commitment update from A, and the resulting commitment dances.
1361         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1362         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1363         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1364         check_added_monitors!(nodes[1], 0);
1365
1366         let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1367         assert_eq!(bs_msgs.len(), 2);
1368
1369         match bs_msgs[0] {
1370                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1371                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1372                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1373                         expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
1374                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1375                         check_added_monitors!(nodes[0], 1);
1376
1377                         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1378                         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1379                         check_added_monitors!(nodes[1], 1);
1380                 },
1381                 _ => panic!("Unexpected event"),
1382         }
1383
1384         match bs_msgs[1] {
1385                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1386                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1387                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1388                         check_added_monitors!(nodes[0], 1);
1389                 },
1390                 _ => panic!("Unexpected event"),
1391         }
1392
1393         let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1394
1395         let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1396         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1397         check_added_monitors!(nodes[0], 1);
1398         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1399
1400         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1401         check_added_monitors!(nodes[1], 1);
1402         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1403         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1404         check_added_monitors!(nodes[1], 1);
1405
1406         expect_pending_htlcs_forwardable!(nodes[1]);
1407         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1408
1409         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1410         check_added_monitors!(nodes[0], 1);
1411         expect_payment_path_successful!(nodes[0]);
1412
1413         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1414 }
1415
1416 #[test]
1417 fn monitor_failed_no_reestablish_response() {
1418         // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1419         // response to a commitment_signed.
1420         // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1421         // debug_assert!() failure in channel_reestablish handling.
1422         let chanmon_cfgs = create_chanmon_cfgs(2);
1423         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1424         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1425         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1426         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1427         {
1428                 let mut node_0_per_peer_lock;
1429                 let mut node_0_peer_state_lock;
1430                 get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1431         }
1432         {
1433                 let mut node_1_per_peer_lock;
1434                 let mut node_1_peer_state_lock;
1435                 get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1436         }
1437
1438         // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1439         // on receipt).
1440         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1441         {
1442                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1443                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1444                 check_added_monitors!(nodes[0], 1);
1445         }
1446
1447         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1448         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1449         assert_eq!(events.len(), 1);
1450         let payment_event = SendEvent::from_event(events.pop().unwrap());
1451         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1452         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1453         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1454         check_added_monitors!(nodes[1], 1);
1455
1456         // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1457         // is still failing to update monitors.
1458         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1459         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1460
1461         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1462                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1463         }, true).unwrap();
1464         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1465                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1466         }, false).unwrap();
1467
1468         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1469         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1470
1471         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1472         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1473         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1474         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1475
1476         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1477         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1478         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1479         check_added_monitors!(nodes[1], 0);
1480         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1481
1482         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1483         check_added_monitors!(nodes[0], 1);
1484         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1485         check_added_monitors!(nodes[0], 1);
1486
1487         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1488         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1489         check_added_monitors!(nodes[1], 1);
1490
1491         expect_pending_htlcs_forwardable!(nodes[1]);
1492         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1493
1494         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1495 }
1496
1497 #[test]
1498 fn first_message_on_recv_ordering() {
1499         // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1500         // messages, we're willing to flip the order of response messages if neccessary in resposne to
1501         // a commitment_signed which needs to send an RAA first.
1502         // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1503         // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1504         // response. To do this, we start routing two payments, with the final RAA for the first being
1505         // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1506         // have no pending response but will want to send a RAA/CS (with the updates for the second
1507         // payment applied).
1508         // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1509         let chanmon_cfgs = create_chanmon_cfgs(2);
1510         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1511         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1512         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1513         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1514
1515         // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1516         // can deliver it and fail the monitor update.
1517         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1518         {
1519                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1520                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1521                 check_added_monitors!(nodes[0], 1);
1522         }
1523
1524         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1525         assert_eq!(events.len(), 1);
1526         let payment_event = SendEvent::from_event(events.pop().unwrap());
1527         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1528         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1529         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1530         check_added_monitors!(nodes[1], 1);
1531         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1532
1533         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1534         check_added_monitors!(nodes[0], 1);
1535         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1536         check_added_monitors!(nodes[0], 1);
1537
1538         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1539
1540         // Route the second payment, generating an update_add_htlc/commitment_signed
1541         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1542         {
1543                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1544                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1545                 check_added_monitors!(nodes[0], 1);
1546         }
1547         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1548         assert_eq!(events.len(), 1);
1549         let payment_event = SendEvent::from_event(events.pop().unwrap());
1550         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1551
1552         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1553
1554         // Deliver the final RAA for the first payment, which does not require a response. RAAs
1555         // generally require a commitment_signed, so the fact that we're expecting an opposite response
1556         // to the next message also tests resetting the delivery order.
1557         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1558         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1559         check_added_monitors!(nodes[1], 1);
1560
1561         // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1562         // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1563         // appropriate HTLC acceptance).
1564         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1565         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1566         check_added_monitors!(nodes[1], 1);
1567         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1568
1569         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1570         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1571         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1572         check_added_monitors!(nodes[1], 0);
1573
1574         expect_pending_htlcs_forwardable!(nodes[1]);
1575         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1576
1577         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1578         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1579         check_added_monitors!(nodes[0], 1);
1580         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1581         check_added_monitors!(nodes[0], 1);
1582
1583         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1584         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1585         check_added_monitors!(nodes[1], 1);
1586
1587         expect_pending_htlcs_forwardable!(nodes[1]);
1588         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1589
1590         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1591         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1592 }
1593
1594 #[test]
1595 fn test_monitor_update_fail_claim() {
1596         // Basic test for monitor update failures when processing claim_funds calls.
1597         // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1598         // update to claim the payment. We then send two payments C->B->A, which are held at B.
1599         // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1600         // the payments from C onwards to A.
1601         let chanmon_cfgs = create_chanmon_cfgs(3);
1602         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1603         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1604         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1605         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1606         create_announced_chan_between_nodes(&nodes, 1, 2);
1607
1608         // Rebalance a bit so that we can send backwards from 3 to 2.
1609         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1610
1611         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1612
1613         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1614         nodes[1].node.claim_funds(payment_preimage_1);
1615         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1616         check_added_monitors!(nodes[1], 1);
1617
1618         // Note that at this point there is a pending commitment transaction update for A being held by
1619         // B. Even when we go to send the payment from C through B to A, B will not update this
1620         // already-signed commitment transaction and will instead wait for it to resolve before
1621         // forwarding the payment onwards.
1622
1623         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000);
1624         {
1625                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1626                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1627                 check_added_monitors!(nodes[2], 1);
1628         }
1629
1630         // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1631         // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1632         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1633
1634         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1635         assert_eq!(events.len(), 1);
1636         let payment_event = SendEvent::from_event(events.pop().unwrap());
1637         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1638         let events = nodes[1].node.get_and_clear_pending_msg_events();
1639         assert_eq!(events.len(), 0);
1640         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1641         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1642
1643         let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1644         nodes[2].node.send_payment_with_route(&route, payment_hash_3,
1645                 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1646         check_added_monitors!(nodes[2], 1);
1647
1648         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1649         assert_eq!(events.len(), 1);
1650         let payment_event = SendEvent::from_event(events.pop().unwrap());
1651         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1652         let events = nodes[1].node.get_and_clear_pending_msg_events();
1653         assert_eq!(events.len(), 0);
1654         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1655
1656         // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1657         let channel_id = chan_1.2;
1658         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1659         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1660         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1661         check_added_monitors!(nodes[1], 0);
1662
1663         let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1664         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1665         commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1666         expect_payment_sent!(nodes[0], payment_preimage_1);
1667
1668         // Get the payment forwards, note that they were batched into one commitment update.
1669         nodes[1].node.process_pending_htlc_forwards();
1670         check_added_monitors!(nodes[1], 1);
1671         let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1672         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1673         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1674         commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1675         expect_pending_htlcs_forwardable!(nodes[0]);
1676
1677         let events = nodes[0].node.get_and_clear_pending_events();
1678         assert_eq!(events.len(), 2);
1679         match events[0] {
1680                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id, .. } => {
1681                         assert_eq!(payment_hash_2, *payment_hash);
1682                         assert_eq!(1_000_000, amount_msat);
1683                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1684                         assert_eq!(via_channel_id, Some(channel_id));
1685                         assert_eq!(via_user_channel_id, Some(42));
1686                         match &purpose {
1687                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
1688                                         assert!(payment_preimage.is_none());
1689                                         assert_eq!(payment_secret_2, *payment_secret);
1690                                 },
1691                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
1692                         }
1693                 },
1694                 _ => panic!("Unexpected event"),
1695         }
1696         match events[1] {
1697                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
1698                         assert_eq!(payment_hash_3, *payment_hash);
1699                         assert_eq!(1_000_000, amount_msat);
1700                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1701                         assert_eq!(via_channel_id, Some(channel_id));
1702                         match &purpose {
1703                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
1704                                         assert!(payment_preimage.is_none());
1705                                         assert_eq!(payment_secret_3, *payment_secret);
1706                                 },
1707                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
1708                         }
1709                 },
1710                 _ => panic!("Unexpected event"),
1711         }
1712 }
1713
1714 #[test]
1715 fn test_monitor_update_on_pending_forwards() {
1716         // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1717         // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1718         // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1719         // from C to A will be pending a forward to A.
1720         let chanmon_cfgs = create_chanmon_cfgs(3);
1721         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1722         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1723         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1724         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1725         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1726
1727         // Rebalance a bit so that we can send backwards from 3 to 1.
1728         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1729
1730         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1731         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
1732         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
1733         check_added_monitors!(nodes[2], 1);
1734
1735         let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1736         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1737         commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1738         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1739
1740         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
1741         {
1742                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1743                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1744                 check_added_monitors!(nodes[2], 1);
1745         }
1746
1747         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1748         assert_eq!(events.len(), 1);
1749         let payment_event = SendEvent::from_event(events.pop().unwrap());
1750         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1751         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1752
1753         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1754         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1755         check_added_monitors!(nodes[1], 1);
1756
1757         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1758         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1759         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1760         check_added_monitors!(nodes[1], 0);
1761
1762         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1763         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1764         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1765         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1766
1767         let events = nodes[0].node.get_and_clear_pending_events();
1768         assert_eq!(events.len(), 3);
1769         if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
1770                 assert_eq!(payment_hash, payment_hash_1);
1771                 assert!(payment_failed_permanently);
1772         } else { panic!("Unexpected event!"); }
1773         match events[2] {
1774                 Event::PaymentFailed { payment_hash, .. } => {
1775                         assert_eq!(payment_hash, payment_hash_1);
1776                 },
1777                 _ => panic!("Unexpected event"),
1778         }
1779         match events[0] {
1780                 Event::PendingHTLCsForwardable { .. } => { },
1781                 _ => panic!("Unexpected event"),
1782         };
1783         nodes[0].node.process_pending_htlc_forwards();
1784         expect_payment_claimable!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
1785
1786         claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
1787 }
1788
1789 #[test]
1790 fn monitor_update_claim_fail_no_response() {
1791         // Test for claim_funds resulting in both a monitor update failure and no message response (due
1792         // to channel being AwaitingRAA).
1793         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1794         // code was broken.
1795         let chanmon_cfgs = create_chanmon_cfgs(2);
1796         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1797         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1798         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1799         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1800
1801         // Forward a payment for B to claim
1802         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1803
1804         // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1805         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1806         {
1807                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1808                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1809                 check_added_monitors!(nodes[0], 1);
1810         }
1811
1812         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1813         assert_eq!(events.len(), 1);
1814         let payment_event = SendEvent::from_event(events.pop().unwrap());
1815         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1816         let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1817
1818         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1819         nodes[1].node.claim_funds(payment_preimage_1);
1820         check_added_monitors!(nodes[1], 1);
1821
1822         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1823
1824         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1825         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1826         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1827         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1828         check_added_monitors!(nodes[1], 0);
1829         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1830
1831         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1832         check_added_monitors!(nodes[1], 1);
1833         expect_pending_htlcs_forwardable!(nodes[1]);
1834         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1835
1836         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1837         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1838         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1839         expect_payment_sent!(nodes[0], payment_preimage_1);
1840
1841         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1842 }
1843
1844 // restore_b_before_conf has no meaning if !confirm_a_first
1845 // restore_b_before_lock has no meaning if confirm_a_first
1846 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) {
1847         // Test that if the monitor update generated by funding_transaction_generated fails we continue
1848         // the channel setup happily after the update is restored.
1849         let chanmon_cfgs = create_chanmon_cfgs(2);
1850         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1851         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1852         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1853
1854         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
1855         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1856         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1857
1858         let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
1859
1860         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1861         check_added_monitors!(nodes[0], 0);
1862
1863         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1864         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1865         let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
1866         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1867         check_added_monitors!(nodes[1], 1);
1868
1869         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1870         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1871         check_added_monitors!(nodes[0], 1);
1872         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1873         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1874         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1875         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1876         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1877         check_added_monitors!(nodes[0], 0);
1878         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
1879
1880         let events = nodes[0].node.get_and_clear_pending_events();
1881         assert_eq!(events.len(), 0);
1882         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1883         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].txid(), funding_output.txid);
1884
1885         if confirm_a_first {
1886                 confirm_transaction(&nodes[0], &funding_tx);
1887                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1888                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1889                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1890         } else {
1891                 assert!(!restore_b_before_conf);
1892                 confirm_transaction(&nodes[1], &funding_tx);
1893                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1894         }
1895
1896         // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
1897         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1898         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1899         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
1900         reconnect_args.send_channel_ready.1 = confirm_a_first;
1901         reconnect_nodes(reconnect_args);
1902
1903         // But we want to re-emit ChannelPending
1904         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
1905         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1906         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1907
1908         if !restore_b_before_conf {
1909                 confirm_transaction(&nodes[1], &funding_tx);
1910                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1911                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1912         }
1913         if !confirm_a_first && !restore_b_before_lock {
1914                 confirm_transaction(&nodes[0], &funding_tx);
1915                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1916                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1917                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1918         }
1919
1920         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1921         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1922         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1923         check_added_monitors!(nodes[1], 0);
1924
1925         let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1926                 if !restore_b_before_lock {
1927                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1928                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1929                 } else {
1930                         nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
1931                         confirm_transaction(&nodes[0], &funding_tx);
1932                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1933                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready))
1934                 }
1935         } else {
1936                 if restore_b_before_conf {
1937                         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1938                         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1939                         confirm_transaction(&nodes[1], &funding_tx);
1940                 }
1941                 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1942                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1943         };
1944         for node in nodes.iter() {
1945                 assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
1946                 node.gossip_sync.handle_channel_update(&as_update).unwrap();
1947                 node.gossip_sync.handle_channel_update(&bs_update).unwrap();
1948         }
1949
1950         if !restore_b_before_lock {
1951                 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
1952         } else {
1953                 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
1954         }
1955
1956
1957         send_payment(&nodes[0], &[&nodes[1]], 8000000);
1958         close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1959         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1960         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1961 }
1962
1963 #[test]
1964 fn during_funding_monitor_fail() {
1965         do_during_funding_monitor_fail(true, true, false);
1966         do_during_funding_monitor_fail(true, false, false);
1967         do_during_funding_monitor_fail(false, false, false);
1968         do_during_funding_monitor_fail(false, false, true);
1969 }
1970
1971 #[test]
1972 fn test_path_paused_mpp() {
1973         // Simple test of sending a multi-part payment where one path is currently blocked awaiting
1974         // monitor update
1975         let chanmon_cfgs = create_chanmon_cfgs(4);
1976         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1977         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1978         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1979
1980         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1981         let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
1982         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
1983         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
1984
1985         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
1986
1987         // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
1988         let path = route.paths[0].clone();
1989         route.paths.push(path);
1990         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
1991         route.paths[0].hops[0].short_channel_id = chan_1_id;
1992         route.paths[0].hops[1].short_channel_id = chan_3_id;
1993         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
1994         route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id;
1995         route.paths[1].hops[1].short_channel_id = chan_4_id;
1996
1997         // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
1998         // (for the path 0 -> 2 -> 3) fails.
1999         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2000         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2001
2002         // Now check that we get the right return value, indicating that the first path succeeded but
2003         // the second got a MonitorUpdateInProgress err. This implies
2004         // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry.
2005         if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route(
2006                 &route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
2007         ) {
2008                 assert_eq!(results.len(), 2);
2009                 if let Ok(()) = results[0] {} else { panic!(); }
2010                 if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); }
2011         } else { panic!(); }
2012         check_added_monitors!(nodes[0], 2);
2013         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2014
2015         // Pass the first HTLC of the payment along to nodes[3].
2016         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2017         assert_eq!(events.len(), 1);
2018         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
2019
2020         // And check that, after we successfully update the monitor for chan_2 we can pass the second
2021         // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
2022         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
2023         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2024         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2025         assert_eq!(events.len(), 1);
2026         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
2027
2028         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
2029 }
2030
2031 #[test]
2032 fn test_pending_update_fee_ack_on_reconnect() {
2033         // In early versions of our automated fee update patch, nodes did not correctly use the
2034         // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an
2035         // undelivered commitment_signed.
2036         //
2037         // B sends A new HTLC + CS, not delivered
2038         // A sends B update_fee + CS
2039         // B receives the CS and sends RAA, previously causing B to lock in the new feerate
2040         // reconnect
2041         // B resends initial CS, using the original fee
2042
2043         let chanmon_cfgs = create_chanmon_cfgs(2);
2044         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2045         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2046         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2047
2048         create_announced_chan_between_nodes(&nodes, 0, 1);
2049         send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
2050
2051         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000);
2052         nodes[1].node.send_payment_with_route(&route, payment_hash,
2053                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
2054         check_added_monitors!(nodes[1], 1);
2055         let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2056         // bs_initial_send_msgs are not delivered until they are re-generated after reconnect
2057
2058         {
2059                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2060                 *feerate_lock *= 2;
2061         }
2062         nodes[0].node.timer_tick_occurred();
2063         check_added_monitors!(nodes[0], 1);
2064         let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2065         assert!(as_update_fee_msgs.update_fee.is_some());
2066
2067         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap());
2068         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed);
2069         check_added_monitors!(nodes[1], 1);
2070         let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2071         // bs_first_raa is not delivered until it is re-generated after reconnect
2072
2073         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2074         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2075
2076         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2077                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2078         }, true).unwrap();
2079         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2080         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2081                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2082         }, false).unwrap();
2083         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2084
2085         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2086         let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
2087         assert_eq!(bs_resend_msgs.len(), 3);
2088         if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] {
2089                 assert_eq!(*updates, bs_initial_send_msgs);
2090         } else { panic!(); }
2091         if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] {
2092                 assert_eq!(*msg, bs_first_raa);
2093         } else { panic!(); }
2094         if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); }
2095
2096         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2097         get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
2098
2099         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]);
2100         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed);
2101         check_added_monitors!(nodes[0], 1);
2102         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2103         check_added_monitors!(nodes[1], 1);
2104         let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed;
2105
2106         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2107         check_added_monitors!(nodes[0], 1);
2108         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed);
2109         check_added_monitors!(nodes[1], 1);
2110         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2111
2112         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
2113         check_added_monitors!(nodes[0], 1);
2114         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2115         check_added_monitors!(nodes[0], 1);
2116
2117         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2118         check_added_monitors!(nodes[1], 1);
2119
2120         expect_pending_htlcs_forwardable!(nodes[0]);
2121         expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000);
2122
2123         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
2124 }
2125
2126 #[test]
2127 fn test_fail_htlc_on_broadcast_after_claim() {
2128         // In an earlier version of 7e78fa660cec8a73286c94c1073ee588140e7a01 we'd also fail the inbound
2129         // channel backwards if we received an HTLC failure after a HTLC fulfillment. Here we test a
2130         // specific case of that by having the HTLC failure come from the ChannelMonitor after a dust
2131         // HTLC was not included in a confirmed commitment transaction.
2132         //
2133         // We first forward a payment, then claim it with an update_fulfill_htlc message, closing the
2134         // channel immediately before commitment occurs. After the commitment transaction reaches
2135         // ANTI_REORG_DELAY confirmations, will will try to fail the HTLC which was already fulfilled.
2136         let chanmon_cfgs = create_chanmon_cfgs(3);
2137         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2138         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2139         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2140
2141         create_announced_chan_between_nodes(&nodes, 0, 1);
2142         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2143
2144         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
2145
2146         let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
2147         assert_eq!(bs_txn.len(), 1);
2148
2149         nodes[2].node.claim_funds(payment_preimage);
2150         check_added_monitors!(nodes[2], 1);
2151         expect_payment_claimed!(nodes[2], payment_hash, 2000);
2152
2153         let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2154         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
2155         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2156         check_added_monitors!(nodes[1], 1);
2157         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2158
2159         mine_transaction(&nodes[1], &bs_txn[0]);
2160         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2161         check_closed_broadcast!(nodes[1], true);
2162         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2163         check_added_monitors!(nodes[1], 1);
2164         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2165
2166         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
2167         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
2168         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
2169         expect_payment_path_successful!(nodes[0]);
2170 }
2171
2172 fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
2173         // In early versions we did not handle resending of update_fee on reconnect correctly. The
2174         // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
2175         // explicitly here.
2176         let chanmon_cfgs = create_chanmon_cfgs(2);
2177         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2178         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2179         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2180
2181         create_announced_chan_between_nodes(&nodes, 0, 1);
2182         send_payment(&nodes[0], &[&nodes[1]], 1000);
2183
2184         {
2185                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2186                 *feerate_lock += 20;
2187         }
2188         nodes[0].node.timer_tick_occurred();
2189         check_added_monitors!(nodes[0], 1);
2190         let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2191         assert!(update_msgs.update_fee.is_some());
2192         if deliver_update {
2193                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2194         }
2195
2196         if parallel_updates {
2197                 {
2198                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2199                         *feerate_lock += 20;
2200                 }
2201                 nodes[0].node.timer_tick_occurred();
2202                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2203         }
2204
2205         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2206         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2207
2208         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2209                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2210         }, true).unwrap();
2211         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2212         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2213                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2214         }, false).unwrap();
2215         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2216
2217         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2218         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
2219         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2220
2221         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2222         let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2223         assert_eq!(as_reconnect_msgs.len(), 2);
2224         if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); }
2225         let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap()
2226                 { updates } else { panic!(); };
2227         assert!(update_msgs.update_fee.is_some());
2228         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2229         if parallel_updates {
2230                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed);
2231                 check_added_monitors!(nodes[1], 1);
2232                 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2233                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2234                 check_added_monitors!(nodes[0], 1);
2235                 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2236
2237                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2238                 check_added_monitors!(nodes[0], 1);
2239                 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2240
2241                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap());
2242                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
2243                 check_added_monitors!(nodes[1], 1);
2244                 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2245
2246                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2247                 let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2248                 check_added_monitors!(nodes[1], 1);
2249
2250                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2251                 check_added_monitors!(nodes[0], 1);
2252
2253                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed);
2254                 check_added_monitors!(nodes[0], 1);
2255                 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2256
2257                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2258                 check_added_monitors!(nodes[1], 1);
2259         } else {
2260                 commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false);
2261         }
2262
2263         send_payment(&nodes[0], &[&nodes[1]], 1000);
2264 }
2265 #[test]
2266 fn update_fee_resend_test() {
2267         do_update_fee_resend_test(false, false);
2268         do_update_fee_resend_test(true, false);
2269         do_update_fee_resend_test(false, true);
2270         do_update_fee_resend_test(true, true);
2271 }
2272
2273 fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
2274         // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
2275         // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
2276         // that behavior was both somewhat unexpected and also broken (there was a debug assertion
2277         // which failed in such a case).
2278         let chanmon_cfgs = create_chanmon_cfgs(2);
2279         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2280         let persister;
2281         let new_chain_monitor;
2282         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2283         let nodes_0_deserialized;
2284         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2285
2286         let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2;
2287         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
2288         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
2289
2290         // Do a really complicated dance to get an HTLC into the holding cell, with
2291         // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any
2292         // attempts to send an HTLC while MonitorUpdateInProgress is set are immediately
2293         // failed-backwards. Thus, the only way to get an AddHTLC into the holding cell is to add it
2294         // while AwaitingRemoteRevoke is set but MonitorUpdateInProgress is unset, and then swap the
2295         // flags.
2296         //
2297         // We do this by:
2298         //  a) routing a payment from node B to node A,
2299         //  b) sending a payment from node A to node B without delivering any of the generated messages,
2300         //     putting node A in AwaitingRemoteRevoke,
2301         //  c) sending a second payment from node A to node B, which is immediately placed in the
2302         //     holding cell,
2303         //  d) claiming the first payment from B, allowing us to fail the monitor update which occurs
2304         //     when we try to persist the payment preimage,
2305         //  e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
2306         //     clearing AwaitingRemoteRevoke on node A.
2307         //
2308         // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
2309         // (c) will not be freed from the holding cell.
2310         let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
2311
2312         nodes[0].node.send_payment_with_route(&route, payment_hash_1,
2313                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
2314         check_added_monitors!(nodes[0], 1);
2315         let send = SendEvent::from_node(&nodes[0]);
2316         assert_eq!(send.msgs.len(), 1);
2317
2318         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
2319                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
2320         check_added_monitors!(nodes[0], 0);
2321
2322         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2323         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2324         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2325         nodes[0].node.claim_funds(payment_preimage_0);
2326         check_added_monitors!(nodes[0], 1);
2327
2328         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
2329         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
2330         check_added_monitors!(nodes[1], 1);
2331
2332         let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2333
2334         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
2335         check_added_monitors!(nodes[0], 1);
2336
2337         if disconnect {
2338                 // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
2339                 // disconnect the peers. Note that the fuzzer originally found this issue because
2340                 // deserializing a ChannelManager in this state causes an assertion failure.
2341                 if reload_a {
2342                         reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
2343                         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2344                         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2345                 } else {
2346                         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2347                 }
2348                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2349
2350                 // Now reconnect the two
2351                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2352                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2353                 }, true).unwrap();
2354                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
2355                 assert_eq!(reestablish_1.len(), 1);
2356                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2357                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2358                 }, false).unwrap();
2359                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
2360                 assert_eq!(reestablish_2.len(), 1);
2361
2362                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
2363                 let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
2364                 check_added_monitors!(nodes[1], 0);
2365
2366                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
2367                 let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
2368
2369                 assert!(resp_0.0.is_none());
2370                 assert!(resp_0.1.is_none());
2371                 assert!(resp_0.2.is_none());
2372                 assert!(resp_1.0.is_none());
2373                 assert!(resp_1.1.is_none());
2374
2375                 // Check that the freshly-generated cs is equal to the original (which we will deliver in a
2376                 // moment).
2377                 if let Some(pending_cs) = resp_1.2 {
2378                         assert!(pending_cs.update_add_htlcs.is_empty());
2379                         assert!(pending_cs.update_fail_htlcs.is_empty());
2380                         assert!(pending_cs.update_fulfill_htlcs.is_empty());
2381                         assert_eq!(pending_cs.commitment_signed, cs);
2382                 } else { panic!(); }
2383
2384                 if reload_a {
2385                         // The two pending monitor updates were replayed (but are still pending).
2386                         check_added_monitors(&nodes[0], 2);
2387                 } else {
2388                         // There should be no monitor updates as we are still pending awaiting a failed one.
2389                         check_added_monitors(&nodes[0], 0);
2390                 }
2391                 check_added_monitors(&nodes[1], 0);
2392         }
2393
2394         // If we finish updating the monitor, we should free the holding cell right away (this did
2395         // not occur prior to #756).
2396         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2397         let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
2398         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
2399         expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
2400
2401         // New outbound messages should be generated immediately upon a call to
2402         // get_and_clear_pending_msg_events (but not before).
2403         check_added_monitors!(nodes[0], 0);
2404         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2405         check_added_monitors!(nodes[0], 1);
2406         assert_eq!(events.len(), 1);
2407
2408         // Deliver the pending in-flight CS
2409         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
2410         check_added_monitors!(nodes[0], 1);
2411
2412         let commitment_msg = match events.pop().unwrap() {
2413                 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2414                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
2415                         assert!(updates.update_fail_htlcs.is_empty());
2416                         assert!(updates.update_fail_malformed_htlcs.is_empty());
2417                         assert!(updates.update_fee.is_none());
2418                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2419                         nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
2420                         expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false);
2421                         assert_eq!(updates.update_add_htlcs.len(), 1);
2422                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
2423                         updates.commitment_signed
2424                 },
2425                 _ => panic!("Unexpected event type!"),
2426         };
2427
2428         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg);
2429         check_added_monitors!(nodes[1], 1);
2430
2431         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2432         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
2433         expect_pending_htlcs_forwardable!(nodes[1]);
2434         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
2435         check_added_monitors!(nodes[1], 1);
2436
2437         commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false);
2438
2439         let events = nodes[1].node.get_and_clear_pending_events();
2440         assert_eq!(events.len(), 2);
2441         match events[0] {
2442                 Event::PendingHTLCsForwardable { .. } => { },
2443                 _ => panic!("Unexpected event"),
2444         };
2445         match events[1] {
2446                 Event::PaymentPathSuccessful { .. } => { },
2447                 _ => panic!("Unexpected event"),
2448         };
2449
2450         nodes[1].node.process_pending_htlc_forwards();
2451         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 100000);
2452
2453         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
2454         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
2455 }
2456 #[test]
2457 fn channel_holding_cell_serialize() {
2458         do_channel_holding_cell_serialize(true, true);
2459         do_channel_holding_cell_serialize(true, false);
2460         do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
2461 }
2462
2463 #[derive(PartialEq)]
2464 enum HTLCStatusAtDupClaim {
2465         Received,
2466         HoldingCell,
2467         Cleared,
2468 }
2469 fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
2470         // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2471         // along the payment path before waiting for a full commitment_signed dance. This is great, but
2472         // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2473         // reconnects, and then has to re-send its update_fulfill_htlc message again.
2474         // In previous code, we didn't handle the double-claim correctly, spuriously closing the
2475         // channel on which the inbound HTLC was received.
2476         let chanmon_cfgs = create_chanmon_cfgs(3);
2477         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2478         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2479         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2480
2481         create_announced_chan_between_nodes(&nodes, 0, 1);
2482         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2483
2484         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
2485
2486         let mut as_raa = None;
2487         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2488                 // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2489                 // awaiting a remote revoke_and_ack from nodes[0].
2490                 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
2491                 nodes[0].node.send_payment_with_route(&route, second_payment_hash,
2492                         RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
2493                 check_added_monitors!(nodes[0], 1);
2494
2495                 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2496                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2497                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2498                 check_added_monitors!(nodes[1], 1);
2499
2500                 let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2501                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2502                 check_added_monitors!(nodes[0], 1);
2503                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2504                 check_added_monitors!(nodes[0], 1);
2505
2506                 as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2507         }
2508
2509         let fulfill_msg = msgs::UpdateFulfillHTLC {
2510                 channel_id: chan_id_2,
2511                 htlc_id: 0,
2512                 payment_preimage,
2513         };
2514         if second_fails {
2515                 nodes[2].node.fail_htlc_backwards(&payment_hash);
2516                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
2517                 check_added_monitors!(nodes[2], 1);
2518                 get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2519         } else {
2520                 nodes[2].node.claim_funds(payment_preimage);
2521                 check_added_monitors!(nodes[2], 1);
2522                 expect_payment_claimed!(nodes[2], payment_hash, 100_000);
2523
2524                 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2525                 assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2526                 // Check that the message we're about to deliver matches the one generated:
2527                 assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
2528         }
2529         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
2530         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2531         check_added_monitors!(nodes[1], 1);
2532
2533         let mut bs_updates = None;
2534         if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2535                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2536                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2537                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2538                 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
2539                 if htlc_status == HTLCStatusAtDupClaim::Cleared {
2540                         commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2541                         expect_payment_path_successful!(nodes[0]);
2542                 }
2543         } else {
2544                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2545         }
2546
2547         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
2548         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2549
2550         if second_fails {
2551                 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
2552                 reconnect_args.pending_htlc_fails.0 = 1;
2553                 reconnect_nodes(reconnect_args);
2554                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2555         } else {
2556                 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
2557                 reconnect_args.pending_htlc_claims.0 = 1;
2558                 reconnect_nodes(reconnect_args);
2559         }
2560
2561         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2562                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2563                 check_added_monitors!(nodes[1], 1);
2564                 expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2565
2566                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2567                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2568                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2569                 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
2570         }
2571         if htlc_status != HTLCStatusAtDupClaim::Cleared {
2572                 commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2573                 expect_payment_path_successful!(nodes[0]);
2574         }
2575 }
2576
2577 #[test]
2578 fn test_reconnect_dup_htlc_claims() {
2579         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
2580         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
2581         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
2582         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
2583         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
2584         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
2585 }
2586
2587 #[test]
2588 fn test_temporary_error_during_shutdown() {
2589         // Test that temporary failures when updating the monitor's shutdown script delay cooperative
2590         // close.
2591         let mut config = test_default_channel_config();
2592         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2593
2594         let chanmon_cfgs = create_chanmon_cfgs(2);
2595         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2596         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
2597         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2598
2599         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2600
2601         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2602         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2603
2604         nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
2605         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
2606         check_added_monitors!(nodes[1], 1);
2607
2608         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
2609         check_added_monitors!(nodes[0], 1);
2610
2611         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2612
2613         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2614         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2615
2616         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2617         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2618         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()));
2619
2620         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2621
2622         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2623         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2624         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2625
2626         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
2627         let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
2628         let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2629
2630         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap());
2631         let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
2632         assert!(none_b.is_none());
2633         let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2634
2635         assert_eq!(txn_a, txn_b);
2636         assert_eq!(txn_a.len(), 1);
2637         check_spends!(txn_a[0], funding_tx);
2638         check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
2639         check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
2640 }
2641
2642 #[test]
2643 fn double_temp_error() {
2644         // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row.
2645         let chanmon_cfgs = create_chanmon_cfgs(2);
2646         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2647         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2648         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2649
2650         let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
2651
2652         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2653         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2654
2655         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2656         // `claim_funds` results in a ChannelMonitorUpdate.
2657         nodes[1].node.claim_funds(payment_preimage_1);
2658         check_added_monitors!(nodes[1], 1);
2659         let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2660
2661         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2662         // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
2663         // which had some asserts that prevented it from being called twice.
2664         nodes[1].node.claim_funds(payment_preimage_2);
2665         check_added_monitors!(nodes[1], 1);
2666         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2667
2668         let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2669         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
2670         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2671         check_added_monitors!(nodes[1], 0);
2672         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
2673
2674         // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
2675         // and get both PaymentClaimed events at once.
2676         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2677
2678         let events = nodes[1].node.get_and_clear_pending_events();
2679         assert_eq!(events.len(), 2);
2680         match events[0] {
2681                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
2682                 _ => panic!("Unexpected Event: {:?}", events[0]),
2683         }
2684         match events[1] {
2685                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
2686                 _ => panic!("Unexpected Event: {:?}", events[1]),
2687         }
2688
2689         assert_eq!(msg_events.len(), 1);
2690         let (update_fulfill_1, commitment_signed_b1, node_id) = {
2691                 match &msg_events[0] {
2692                         &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2693                                 assert!(update_add_htlcs.is_empty());
2694                                 assert_eq!(update_fulfill_htlcs.len(), 1);
2695                                 assert!(update_fail_htlcs.is_empty());
2696                                 assert!(update_fail_malformed_htlcs.is_empty());
2697                                 assert!(update_fee.is_none());
2698                                 (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
2699                         },
2700                         _ => panic!("Unexpected event"),
2701                 }
2702         };
2703         assert_eq!(node_id, nodes[0].node.get_our_node_id());
2704         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
2705         check_added_monitors!(nodes[0], 0);
2706         expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2707         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
2708         check_added_monitors!(nodes[0], 1);
2709         nodes[0].node.process_pending_htlc_forwards();
2710         let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2711         check_added_monitors!(nodes[1], 0);
2712         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2713         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1);
2714         check_added_monitors!(nodes[1], 1);
2715         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1);
2716         check_added_monitors!(nodes[1], 1);
2717
2718         // Complete the second HTLC.
2719         let ((update_fulfill_2, commitment_signed_b2), raa_b2) = {
2720                 let events = nodes[1].node.get_and_clear_pending_msg_events();
2721                 assert_eq!(events.len(), 2);
2722                 (match &events[0] {
2723                         MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2724                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2725                                 assert!(updates.update_add_htlcs.is_empty());
2726                                 assert!(updates.update_fail_htlcs.is_empty());
2727                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
2728                                 assert!(updates.update_fee.is_none());
2729                                 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2730                                 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
2731                         },
2732                         _ => panic!("Unexpected event"),
2733                 },
2734                  match events[1] {
2735                          MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
2736                                  assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2737                                  (*msg).clone()
2738                          },
2739                          _ => panic!("Unexpected event"),
2740                  })
2741         };
2742         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2);
2743         check_added_monitors!(nodes[0], 1);
2744         expect_payment_path_successful!(nodes[0]);
2745
2746         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2);
2747         check_added_monitors!(nodes[0], 0);
2748         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2749         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
2750         expect_payment_sent!(nodes[0], payment_preimage_2);
2751 }
2752
2753 fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
2754         // Test that if the monitor update generated in funding_signed is stored async and we restart
2755         // with the latest ChannelManager but the ChannelMonitor persistence never completed we happily
2756         // drop the channel and move on.
2757         let chanmon_cfgs = create_chanmon_cfgs(2);
2758         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2759
2760         let persister;
2761         let new_chain_monitor;
2762
2763         let mut chan_config = test_default_channel_config();
2764         chan_config.manually_accept_inbound_channels = true;
2765         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2766
2767         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2768         let nodes_0_deserialized;
2769
2770         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2771
2772         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
2773         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2774
2775         let events = nodes[1].node.get_and_clear_pending_events();
2776         assert_eq!(events.len(), 1);
2777         match events[0] {
2778                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2779                         if use_0conf {
2780                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2781                         } else {
2782                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2783                         }
2784                 },
2785                 _ => panic!("Unexpected event"),
2786         };
2787
2788         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2789
2790         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2791
2792         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2793         check_added_monitors!(nodes[0], 0);
2794
2795         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2796         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2797         check_added_monitors!(nodes[1], 1);
2798         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
2799
2800         let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
2801         assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
2802         match &bs_signed_locked[0] {
2803                 MessageSendEvent::SendFundingSigned { msg, .. } => {
2804                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2805
2806                         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
2807                         check_added_monitors!(nodes[0], 1);
2808                 }
2809                 _ => panic!("Unexpected event"),
2810         }
2811         if use_0conf {
2812                 match &bs_signed_locked[1] {
2813                         MessageSendEvent::SendChannelReady { msg, .. } => {
2814                                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
2815                         }
2816                         _ => panic!("Unexpected event"),
2817                 }
2818         }
2819
2820         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
2821         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2822         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2823
2824         // nodes[0] is now waiting on the first ChannelMonitor persistence to complete in order to
2825         // broadcast the funding transaction. If nodes[0] restarts at this point with the
2826         // ChannelMonitor lost, we should simply discard the channel.
2827
2828         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2829         // not, so we have to clear them here.
2830         nodes[0].chain_source.watched_txn.lock().unwrap().clear();
2831         nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
2832
2833         reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
2834         check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 100000);
2835         assert!(nodes[0].node.list_channels().is_empty());
2836 }
2837
2838 #[test]
2839 fn test_outbound_reload_without_init_mon() {
2840         do_test_outbound_reload_without_init_mon(true);
2841         do_test_outbound_reload_without_init_mon(false);
2842 }
2843
2844 fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: bool) {
2845         // Test that if the monitor update generated by funding_transaction_generated is stored async
2846         // and we restart with the latest ChannelManager but the ChannelMonitor persistence never
2847         // completed we happily drop the channel and move on.
2848         let chanmon_cfgs = create_chanmon_cfgs(2);
2849         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2850
2851         let persister;
2852         let new_chain_monitor;
2853
2854         let mut chan_config = test_default_channel_config();
2855         chan_config.manually_accept_inbound_channels = true;
2856         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2857
2858         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2859         let nodes_1_deserialized;
2860
2861         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2862
2863         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
2864         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2865
2866         let events = nodes[1].node.get_and_clear_pending_events();
2867         assert_eq!(events.len(), 1);
2868         match events[0] {
2869                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2870                         if use_0conf {
2871                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2872                         } else {
2873                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2874                         }
2875                 },
2876                 _ => panic!("Unexpected event"),
2877         };
2878
2879         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2880
2881         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2882
2883         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2884         check_added_monitors!(nodes[0], 0);
2885
2886         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2887         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2888         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2889         check_added_monitors!(nodes[1], 1);
2890
2891         // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the
2892         // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding
2893         // transaction is confirmed.
2894         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
2895
2896         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
2897         check_added_monitors!(nodes[0], 1);
2898         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
2899
2900         let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2901         if lock_commitment {
2902                 confirm_transaction(&nodes[0], &as_funding_tx[0]);
2903                 confirm_transaction(&nodes[1], &as_funding_tx[0]);
2904         }
2905         if use_0conf || lock_commitment {
2906                 let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
2907                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_ready);
2908         }
2909         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2910
2911         // nodes[1] is now waiting on the first ChannelMonitor persistence to complete in order to
2912         // move the channel to ready (or is waiting on the funding transaction to confirm). If nodes[1]
2913         // restarts at this point with the ChannelMonitor lost, we should simply discard the channel.
2914
2915         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2916         // not, so we have to clear them here.
2917         nodes[1].chain_source.watched_txn.lock().unwrap().clear();
2918         nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
2919
2920         reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
2921
2922         check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 100000);
2923         assert!(nodes[1].node.list_channels().is_empty());
2924 }
2925
2926 #[test]
2927 fn test_inbound_reload_without_init_mon() {
2928         do_test_inbound_reload_without_init_mon(true, true);
2929         do_test_inbound_reload_without_init_mon(true, false);
2930         do_test_inbound_reload_without_init_mon(false, true);
2931         do_test_inbound_reload_without_init_mon(false, false);
2932 }
2933
2934 #[test]
2935 fn test_blocked_chan_preimage_release() {
2936         // Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to
2937         // be handled HTLC preimage `ChannelMonitorUpdate`s will still go out.
2938         let chanmon_cfgs = create_chanmon_cfgs(3);
2939         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2940         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2941         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2942
2943         create_announced_chan_between_nodes(&nodes, 0, 1);
2944         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2945
2946         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
2947
2948         // Tee up two payments in opposite directions across nodes[1], one it sent to generate a
2949         // PaymentSent event and one it forwards.
2950         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
2951         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
2952
2953         // Claim the first payment to get a `PaymentSent` event (but don't handle it yet).
2954         nodes[2].node.claim_funds(payment_preimage_1);
2955         check_added_monitors(&nodes[2], 1);
2956         expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);
2957
2958         let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2959         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
2960         do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, false, false);
2961         check_added_monitors(&nodes[1], 0);
2962
2963         // Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to
2964         // claim an HTLC on its channel with nodes[2], but that channel is blocked on the above
2965         // `PaymentSent` event.
2966         nodes[0].node.claim_funds(payment_preimage_2);
2967         check_added_monitors(&nodes[0], 1);
2968         expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000);
2969
2970         let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2971         nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
2972         check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
2973         assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2));
2974         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2975
2976         // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the
2977         // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the
2978         // channel.
2979         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed);
2980         check_added_monitors(&nodes[1], 1);
2981         let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
2982         assert!(a.is_none());
2983
2984         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
2985         check_added_monitors(&nodes[1], 0);
2986         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2987
2988         let events = nodes[1].node.get_and_clear_pending_events();
2989         assert_eq!(events.len(), 3);
2990         if let Event::PaymentSent { .. } = events[0] {} else { panic!(); }
2991         if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
2992         if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
2993
2994         // The event processing should release the last RAA updates on both channels.
2995         check_added_monitors(&nodes[1], 2);
2996
2997         // When we fetch the next update the message getter will generate the next update for nodes[2],
2998         // generating a further monitor update.
2999         let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3000         check_added_monitors(&nodes[1], 1);
3001
3002         nodes[2].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
3003         do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
3004         expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
3005 }
3006
3007 fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) {
3008         // When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages
3009         // from the downstream channel, we immediately claim the HTLC on the upstream channel, before
3010         // even doing a `commitment_signed` dance on the downstream channel. This implies that our
3011         // `ChannelMonitorUpdate`s are generated in the right order - first we ensure we'll get our
3012         // money, then we write the update that resolves the downstream node claiming their money. This
3013         // is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are
3014         // generated, but of course this may not be the case. For asynchronous update writes, we have
3015         // to ensure monitor updates can block each other, preventing the inversion all together.
3016         let chanmon_cfgs = create_chanmon_cfgs(3);
3017         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3018
3019         let persister;
3020         let new_chain_monitor;
3021         let nodes_1_deserialized;
3022
3023         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3024         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3025
3026         let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3027         let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3028
3029         // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3030         // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
3031         // on the B<->C channel but leave the A<->B monitor update pending, then reload B.
3032         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
3033
3034         let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3035         let mut manager_b = Vec::new();
3036         if !with_latest_manager {
3037                 manager_b = nodes[1].node.encode();
3038         }
3039
3040         nodes[2].node.claim_funds(payment_preimage);
3041         check_added_monitors(&nodes[2], 1);
3042         expect_payment_claimed!(nodes[2], payment_hash, 100_000);
3043
3044         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3045         let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3046         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3047
3048         // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3049         // for it since the monitor update is marked in-progress.
3050         check_added_monitors(&nodes[1], 1);
3051         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3052
3053         // Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we
3054         // won't get the preimage when the nodes reconnect and we have to get it from the
3055         // ChannelMonitor.
3056         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3057         check_added_monitors(&nodes[1], 1);
3058         if complete_bc_commitment_dance {
3059                 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3060                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3061                 check_added_monitors(&nodes[2], 1);
3062                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3063                 check_added_monitors(&nodes[2], 1);
3064                 let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3065
3066                 // At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the
3067                 // preimage in the A <-> B channel, which will prevent it from persisting the
3068                 // `ChannelMonitorUpdate` for the B<->C channel here to avoid "losing" the preimage.
3069                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_raa);
3070                 check_added_monitors(&nodes[1], 0);
3071                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3072         }
3073
3074         // Now reload node B
3075         if with_latest_manager {
3076                 manager_b = nodes[1].node.encode();
3077         }
3078
3079         let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3080         reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3081
3082         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3083         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3084
3085         if with_latest_manager {
3086                 // If we used the latest ChannelManager to reload from, we should have both channels still
3087                 // live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
3088                 // before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
3089                 // When we call `timer_tick_occurred` we will get that monitor update back, which we'll
3090                 // complete after reconnecting to our peers.
3091                 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3092                 nodes[1].node.timer_tick_occurred();
3093                 check_added_monitors(&nodes[1], 1);
3094                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3095
3096                 // Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
3097                 // the end go ahead and do that, though the
3098                 // `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
3099                 // expect to *not* receive the final RAA ChannelMonitorUpdate.
3100                 if complete_bc_commitment_dance {
3101                         reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
3102                 } else {
3103                         let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
3104                         reconnect_args.pending_responding_commitment_signed.1 = true;
3105                         reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3106                         reconnect_args.pending_raa = (false, true);
3107                         reconnect_nodes(reconnect_args);
3108                 }
3109
3110                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3111
3112                 // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
3113                 // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
3114                 // process.
3115                 let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3116                 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3117
3118                 // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has
3119                 // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
3120                 // channel.
3121         } else {
3122                 // If the ChannelManager used in the reload was stale, check that the B <-> C channel was
3123                 // closed.
3124                 //
3125                 // Note that this will also process the ChannelMonitorUpdates which were queued up when we
3126                 // reloaded the ChannelManager. This will re-emit the A<->B preimage as well as the B<->C
3127                 // force-closure ChannelMonitorUpdate. Once the A<->B preimage update completes, the claim
3128                 // commitment update will be allowed to go out.
3129                 check_added_monitors(&nodes[1], 0);
3130                 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3131                 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3132                 check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100_000);
3133                 check_added_monitors(&nodes[1], 2);
3134
3135                 nodes[1].node.timer_tick_occurred();
3136                 check_added_monitors(&nodes[1], 0);
3137
3138                 // Don't bother to reconnect B to C - that channel has been closed. We don't need to
3139                 // exchange any messages here even though there's a pending commitment update because the
3140                 // ChannelMonitorUpdate hasn't yet completed.
3141                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3142
3143                 let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3144                 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3145
3146                 // The ChannelMonitorUpdate which was completed prior to the reconnect only contained the
3147                 // preimage (as it was a replay of the original ChannelMonitorUpdate from before we
3148                 // restarted). When we go to fetch the commitment transaction updates we'll poll the
3149                 // ChannelMonitorUpdate completion, then generate (and complete) a new ChannelMonitorUpdate
3150                 // with the actual commitment transaction, which will allow us to fulfill the HTLC with
3151                 // node A.
3152         }
3153
3154         let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
3155         check_added_monitors(&nodes[1], 1);
3156
3157         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
3158         do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
3159
3160         expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager);
3161
3162         // Finally, check that the payment was, ultimately, seen as sent by node A.
3163         expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
3164 }
3165
3166 #[test]
3167 fn test_inverted_mon_completion_order() {
3168         do_test_inverted_mon_completion_order(true, true);
3169         do_test_inverted_mon_completion_order(true, false);
3170         do_test_inverted_mon_completion_order(false, true);
3171         do_test_inverted_mon_completion_order(false, false);
3172 }
3173
3174 fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool) {
3175         // Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel
3176         // is force-closed between when we generate the update on reload and when we go to handle the
3177         // update or prior to generating the update at all.
3178
3179         if !close_chans_before_reload && close_only_a {
3180                 // If we're not closing, it makes no sense to "only close A"
3181                 panic!();
3182         }
3183
3184         let chanmon_cfgs = create_chanmon_cfgs(3);
3185         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3186
3187         let persister;
3188         let new_chain_monitor;
3189         let nodes_1_deserialized;
3190
3191         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3192         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3193
3194         let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3195         let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3196
3197         // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3198         // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
3199         // on the B<->C channel but leave the A<->B monitor update pending, then reload B.
3200         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3201
3202         let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3203
3204         nodes[2].node.claim_funds(payment_preimage);
3205         check_added_monitors(&nodes[2], 1);
3206         expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3207
3208         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3209         let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3210         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3211
3212         // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3213         // for it since the monitor update is marked in-progress.
3214         check_added_monitors(&nodes[1], 1);
3215         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3216
3217         // Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get
3218         // the preimage when the nodes reconnect, at which point we have to ensure we get it from the
3219         // ChannelMonitor.
3220         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3221         check_added_monitors(&nodes[1], 1);
3222         let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3223
3224         let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3225
3226         if close_chans_before_reload {
3227                 if !close_only_a {
3228                         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3229                         nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
3230                         check_closed_broadcast(&nodes[1], 1, true);
3231                         check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
3232                 }
3233
3234                 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3235                 nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
3236                 check_closed_broadcast(&nodes[1], 1, true);
3237                 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
3238         }
3239
3240         // Now reload node B
3241         let manager_b = nodes[1].node.encode();
3242         reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3243
3244         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3245         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3246
3247         if close_chans_before_reload {
3248                 // If the channels were already closed, B will rebroadcast its closing transactions here.
3249                 let bs_close_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3250                 if close_only_a {
3251                         assert_eq!(bs_close_txn.len(), 2);
3252                 } else {
3253                         assert_eq!(bs_close_txn.len(), 3);
3254                 }
3255         }
3256
3257         nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
3258         check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
3259         let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3260         assert_eq!(as_closing_tx.len(), 1);
3261
3262         // In order to give A's closing transaction to B without processing background events first,
3263         // use the _without_consistency_checks utility method. This is similar to connecting blocks
3264         // during startup prior to the node being full initialized.
3265         mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
3266
3267         // After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B
3268         // ChannelMonitor (possible twice), even though the channel has since been closed.
3269         check_added_monitors(&nodes[1], 0);
3270         let mons_added = if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 };
3271         if hold_post_reload_mon_update {
3272                 for _ in 0..mons_added {
3273                         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3274                 }
3275         }
3276         nodes[1].node.timer_tick_occurred();
3277         check_added_monitors(&nodes[1], mons_added);
3278
3279         // Finally, check that B created a payment preimage transaction and close out the payment.
3280         let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3281         assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 });
3282         let bs_preimage_tx = &bs_txn[0];
3283         check_spends!(bs_preimage_tx, as_closing_tx[0]);
3284
3285         if !close_chans_before_reload {
3286                 check_closed_broadcast(&nodes[1], 1, true);
3287                 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
3288         } else {
3289                 // While we forwarded the payment a while ago, we don't want to process events too early or
3290                 // we'll run background tasks we wanted to test individually.
3291                 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
3292         }
3293
3294         mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
3295         check_closed_broadcast(&nodes[0], 1, true);
3296         expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
3297
3298         if !close_chans_before_reload || close_only_a {
3299                 // Make sure the B<->C channel is still alive and well by sending a payment over it.
3300                 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
3301                 reconnect_args.pending_responding_commitment_signed.1 = true;
3302                 if !close_chans_before_reload {
3303                         // TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
3304                         // will consider the forwarded payment complete and allow the B<->C
3305                         // `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
3306                         // be allowed, and needs fixing.
3307                         reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3308                 }
3309                 reconnect_args.pending_raa.1 = true;
3310
3311                 reconnect_nodes(reconnect_args);
3312                 let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3313                 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
3314                 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
3315                 if !close_chans_before_reload {
3316                         // Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
3317                         // channel will fly, removing the payment preimage from it.
3318                         check_added_monitors(&nodes[1], 1);
3319                 }
3320                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3321                 send_payment(&nodes[1], &[&nodes[2]], 100_000);
3322         }
3323 }
3324
3325 #[test]
3326 fn test_durable_preimages_on_closed_channel() {
3327         do_test_durable_preimages_on_closed_channel(true, true, true);
3328         do_test_durable_preimages_on_closed_channel(true, true, false);
3329         do_test_durable_preimages_on_closed_channel(true, false, true);
3330         do_test_durable_preimages_on_closed_channel(true, false, false);
3331         do_test_durable_preimages_on_closed_channel(false, false, true);
3332         do_test_durable_preimages_on_closed_channel(false, false, false);
3333 }
3334
3335 fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
3336         // Test that if a `ChannelMonitorUpdate` completes but a `ChannelManager` isn't serialized
3337         // before restart we run the monitor update completion action on startup.
3338         let chanmon_cfgs = create_chanmon_cfgs(3);
3339         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3340
3341         let persister;
3342         let new_chain_monitor;
3343         let nodes_1_deserialized;
3344
3345         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3346         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3347
3348         let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3349         let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3350
3351         // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3352         // `update_fulfill_htlc`+`commitment_signed` we have a monitor update for both of B's channels.
3353         // We complete the commitment signed dance on the B<->C channel but leave the A<->B monitor
3354         // update pending, then reload B. At that point, the final monitor update on the B<->C channel
3355         // is still pending because it can't fly until the preimage is persisted on the A<->B monitor.
3356         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3357
3358         nodes[2].node.claim_funds(payment_preimage);
3359         check_added_monitors(&nodes[2], 1);
3360         expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3361
3362         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3363         let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3364         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3365
3366         // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3367         // for it since the monitor update is marked in-progress.
3368         check_added_monitors(&nodes[1], 1);
3369         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3370
3371         // Now step the Commitment Signed Dance between B and C and check that after the final RAA B
3372         // doesn't let the preimage-removing monitor update fly.
3373         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3374         check_added_monitors(&nodes[1], 1);
3375         let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3376
3377         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
3378         check_added_monitors(&nodes[2], 1);
3379         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
3380         check_added_monitors(&nodes[2], 1);
3381
3382         let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3383         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_final_raa);
3384         check_added_monitors(&nodes[1], 0);
3385
3386         // Finally, reload node B and check that after we call `process_pending_events` once we realize
3387         // we've completed the A<->B preimage-including monitor update and so can release the B<->C
3388         // preimage-removing monitor update.
3389         let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3390         let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3391         let manager_b = nodes[1].node.encode();
3392         reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3393
3394         if close_during_reload {
3395                 // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded
3396                 // (as learned about during the on-reload block connection).
3397                 nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
3398                 check_added_monitors!(nodes[0], 1);
3399                 check_closed_broadcast!(nodes[0], true);
3400                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
3401                 let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3402                 mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
3403         }
3404
3405         let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
3406         let mut events = nodes[1].node.get_and_clear_pending_events();
3407         assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
3408         expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000),
3409                 None, close_during_reload, false, false);
3410         if close_during_reload {
3411                 match events[0] {
3412                         Event::ChannelClosed { .. } => {},
3413                         _ => panic!(),
3414                 }
3415                 check_closed_broadcast!(nodes[1], true);
3416         }
3417
3418         // Once we run event processing the monitor should free, check that it was indeed the B<->C
3419         // channel which was updated.
3420         check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 });
3421         let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
3422         assert!(bc_update_id != post_ev_bc_update_id);
3423
3424         // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates
3425         // fine.
3426         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3427         reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
3428         send_payment(&nodes[1], &[&nodes[2]], 100_000);
3429 }
3430
3431 #[test]
3432 fn test_reload_mon_update_completion_actions() {
3433         do_test_reload_mon_update_completion_actions(true);
3434         do_test_reload_mon_update_completion_actions(false);
3435 }
3436
3437 fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) {
3438         // Test that if a peer manages to send an `update_fulfill_htlc` message without a
3439         // `commitment_signed`, disconnects, then replays the `update_fulfill_htlc` message it doesn't
3440         // result in a channel hang. This was previously broken as the `DuplicateClaim` case wasn't
3441         // handled when claiming an HTLC and handling wasn't added when completion actions were added
3442         // (which must always complete at some point).
3443         let chanmon_cfgs = create_chanmon_cfgs(3);
3444         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3445
3446         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3447         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3448
3449         create_announced_chan_between_nodes(&nodes, 0, 1);
3450         create_announced_chan_between_nodes(&nodes, 1, 2);
3451
3452         // Route a payment from A, through B, to C, then claim it on C. Replay the
3453         // `update_fulfill_htlc` twice on B to check that B doesn't hang.
3454         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3455
3456         nodes[2].node.claim_funds(payment_preimage);
3457         check_added_monitors(&nodes[2], 1);
3458         expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3459
3460         let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3461         if hold_chan_a {
3462                 // The first update will be on the A <-> B channel, which we allow to complete.
3463                 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3464         }
3465         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3466         check_added_monitors(&nodes[1], 1);
3467
3468         if !hold_chan_a {
3469                 let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
3470                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
3471                 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
3472                 expect_payment_sent!(&nodes[0], payment_preimage);
3473         }
3474
3475         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
3476         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3477
3478         let mut reconnect = ReconnectArgs::new(&nodes[1], &nodes[2]);
3479         reconnect.pending_htlc_claims = (1, 0);
3480         reconnect_nodes(reconnect);
3481
3482         if !hold_chan_a {
3483                 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
3484                 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
3485         } else {
3486                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3487                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3488
3489                 let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[1], nodes[2], 1_000_000);
3490
3491                 nodes[1].node.send_payment_with_route(&route, payment_hash_2,
3492                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
3493                 check_added_monitors(&nodes[1], 0);
3494
3495                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3496                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3497         }
3498 }
3499
3500 #[test]
3501 fn test_glacial_peer_cant_hang() {
3502         do_test_glacial_peer_cant_hang(false);
3503         do_test_glacial_peer_cant_hang(true);
3504 }