Store channels per peer
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
11 //! monitor updates.
12 //! There are a bunch of these as their handling is relatively error-prone so they are split out
13 //! here. See also the chanmon_fail_consistency fuzz test.
14
15 use bitcoin::blockdata::block::{Block, BlockHeader};
16 use bitcoin::blockdata::constants::genesis_block;
17 use bitcoin::hash_types::BlockHash;
18 use bitcoin::network::constants::Network;
19 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
20 use crate::chain::transaction::OutPoint;
21 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
22 use crate::ln::channelmanager::{self, ChannelManager, RAACommitmentOrder, PaymentSendFailure, PaymentId};
23 use crate::ln::channel::AnnouncementSigsState;
24 use crate::ln::msgs;
25 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
26 use crate::util::enforcing_trait_impls::EnforcingSigner;
27 use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
28 use crate::util::errors::APIError;
29 use crate::util::ser::{ReadableArgs, Writeable};
30 use crate::util::test_utils::TestBroadcaster;
31
32 use crate::ln::functional_test_utils::*;
33
34 use crate::util::test_utils;
35
36 use crate::io;
37 use bitcoin::hashes::Hash;
38 use bitcoin::TxMerkleNode;
39 use crate::prelude::*;
40 use crate::sync::{Arc, Mutex};
41
42 #[test]
43 fn test_simple_monitor_permanent_update_fail() {
44         // Test that we handle a simple permanent monitor update failure
45         let chanmon_cfgs = create_chanmon_cfgs(2);
46         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
47         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
48         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
49         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
50
51         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
52         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
53         unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)), true, APIError::ChannelUnavailable {..}, {});
54         check_added_monitors!(nodes[0], 2);
55
56         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
57         assert_eq!(events_1.len(), 2);
58         match events_1[0] {
59                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
60                 _ => panic!("Unexpected event"),
61         };
62         match events_1[1] {
63                 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
64                 _ => panic!("Unexpected event"),
65         };
66
67         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
68
69         // TODO: Once we hit the chain with the failure transaction we should check that we get a
70         // PaymentPathFailed event
71
72         assert_eq!(nodes[0].node.list_channels().len(), 0);
73         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
74 }
75
76 #[test]
77 fn test_monitor_and_persister_update_fail() {
78         // Test that if both updating the `ChannelMonitor` and persisting the updated
79         // `ChannelMonitor` fail, then the failure from updating the `ChannelMonitor`
80         // one that gets returned.
81         let chanmon_cfgs = create_chanmon_cfgs(2);
82         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
83         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
84         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
85
86         // Create some initial channel
87         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
88         let outpoint = OutPoint { txid: chan.3.txid(), index: 0 };
89
90         // Rebalance the network to generate htlc in the two directions
91         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
92
93         // Route an HTLC from node 0 to node 1 (but don't settle)
94         let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
95
96         // Make a copy of the ChainMonitor so we can capture the error it returns on a
97         // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
98         // directly, the node would fail to be `Drop`'d at the end because its
99         // ChannelManager and ChainMonitor would be out of sync.
100         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
101         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
102         let persister = test_utils::TestPersister::new();
103         let tx_broadcaster = TestBroadcaster {
104                 txn_broadcasted: Mutex::new(Vec::new()),
105                 // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
106                 // that we are at height 200 so that it doesn't think we're violating the time lock
107                 // requirements of transactions broadcasted at that point.
108                 blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
109         };
110         let chain_mon = {
111                 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
112                 let mut w = test_utils::TestVecWriter(Vec::new());
113                 monitor.write(&mut w).unwrap();
114                 let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
115                         &mut io::Cursor::new(&w.0), nodes[0].keys_manager).unwrap().1;
116                 assert!(new_monitor == *monitor);
117                 let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
118                 assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
119                 chain_mon
120         };
121         let header = BlockHeader {
122                 version: 0x20000000,
123                 prev_blockhash: BlockHash::all_zeros(),
124                 merkle_root: TxMerkleNode::all_zeros(),
125                 time: 42,
126                 bits: 42,
127                 nonce: 42
128         };
129         chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
130
131         // Set the persister's return value to be a InProgress.
132         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
133
134         // Try to update ChannelMonitor
135         nodes[1].node.claim_funds(preimage);
136         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
137         check_added_monitors!(nodes[1], 1);
138
139         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
140         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
141         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
142         {
143                 let mut node_0_per_peer_lock;
144                 let mut node_0_peer_state_lock;
145                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
146                 if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
147                         // Check that even though the persister is returning a InProgress,
148                         // because the update is bogus, ultimately the error that's returned
149                         // should be a PermanentFailure.
150                         if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); }
151                         logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
152                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
153                 } else { assert!(false); }
154         }
155
156         check_added_monitors!(nodes[0], 1);
157         let events = nodes[0].node.get_and_clear_pending_events();
158         assert_eq!(events.len(), 1);
159 }
160
161 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
162         // Test that we can recover from a simple temporary monitor update failure optionally with
163         // a disconnect in between
164         let chanmon_cfgs = create_chanmon_cfgs(2);
165         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
166         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
167         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
168         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
169
170         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
171
172         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
173
174         {
175                 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)), false, APIError::MonitorUpdateInProgress, {});
176                 check_added_monitors!(nodes[0], 1);
177         }
178
179         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
180         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
181         assert_eq!(nodes[0].node.list_channels().len(), 1);
182
183         if disconnect {
184                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
185                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
186                 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
187         }
188
189         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
190         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
191         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
192         check_added_monitors!(nodes[0], 0);
193
194         let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
195         assert_eq!(events_2.len(), 1);
196         let payment_event = SendEvent::from_event(events_2.pop().unwrap());
197         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
198         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
199         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
200
201         expect_pending_htlcs_forwardable!(nodes[1]);
202
203         let events_3 = nodes[1].node.get_and_clear_pending_events();
204         assert_eq!(events_3.len(), 1);
205         match events_3[0] {
206                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
207                         assert_eq!(payment_hash_1, *payment_hash);
208                         assert_eq!(amount_msat, 1_000_000);
209                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
210                         assert_eq!(via_channel_id, Some(channel_id));
211                         match &purpose {
212                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
213                                         assert!(payment_preimage.is_none());
214                                         assert_eq!(payment_secret_1, *payment_secret);
215                                 },
216                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
217                         }
218                 },
219                 _ => panic!("Unexpected event"),
220         }
221
222         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
223
224         // Now set it to failed again...
225         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
226         {
227                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
228                 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)), false, APIError::MonitorUpdateInProgress, {});
229                 check_added_monitors!(nodes[0], 1);
230         }
231
232         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
233         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
234         assert_eq!(nodes[0].node.list_channels().len(), 1);
235
236         if disconnect {
237                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
238                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
239                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
240         }
241
242         // ...and make sure we can force-close a frozen channel
243         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
244         check_added_monitors!(nodes[0], 1);
245         check_closed_broadcast!(nodes[0], true);
246
247         // TODO: Once we hit the chain with the failure transaction we should check that we get a
248         // PaymentPathFailed event
249
250         assert_eq!(nodes[0].node.list_channels().len(), 0);
251         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
252 }
253
254 #[test]
255 fn test_simple_monitor_temporary_update_fail() {
256         do_test_simple_monitor_temporary_update_fail(false);
257         do_test_simple_monitor_temporary_update_fail(true);
258 }
259
260 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
261         let disconnect_flags = 8 | 16;
262
263         // Test that we can recover from a temporary monitor update failure with some in-flight
264         // HTLCs going on at the same time potentially with some disconnection thrown in.
265         // * First we route a payment, then get a temporary monitor update failure when trying to
266         //   route a second payment. We then claim the first payment.
267         // * If disconnect_count is set, we will disconnect at this point (which is likely as
268         //   InProgress likely indicates net disconnect which resulted in failing to update the
269         //   ChannelMonitor on a watchtower).
270         // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
271         //   immediately, otherwise we wait disconnect and deliver them via the reconnect
272         //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
273         //   disconnect_count & !disconnect_flags is 0).
274         // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
275         //   through message sending, potentially disconnect/reconnecting multiple times based on
276         //   disconnect_count, to get the update_fulfill_htlc through.
277         // * We then walk through more message exchanges to get the original update_add_htlc
278         //   through, swapping message ordering based on disconnect_count & 8 and optionally
279         //   disconnect/reconnecting based on disconnect_count.
280         let chanmon_cfgs = create_chanmon_cfgs(2);
281         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
282         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
283         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
284         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
285
286         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
287
288         // Now try to send a second payment which will fail to send
289         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
290         {
291                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
292                 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)), false, APIError::MonitorUpdateInProgress, {});
293                 check_added_monitors!(nodes[0], 1);
294         }
295
296         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
297         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
298         assert_eq!(nodes[0].node.list_channels().len(), 1);
299
300         // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
301         // but nodes[0] won't respond since it is frozen.
302         nodes[1].node.claim_funds(payment_preimage_1);
303         check_added_monitors!(nodes[1], 1);
304         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
305
306         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
307         assert_eq!(events_2.len(), 1);
308         let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
309                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
310                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
311                         assert!(update_add_htlcs.is_empty());
312                         assert_eq!(update_fulfill_htlcs.len(), 1);
313                         assert!(update_fail_htlcs.is_empty());
314                         assert!(update_fail_malformed_htlcs.is_empty());
315                         assert!(update_fee.is_none());
316
317                         if (disconnect_count & 16) == 0 {
318                                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
319                                 let events_3 = nodes[0].node.get_and_clear_pending_events();
320                                 assert_eq!(events_3.len(), 1);
321                                 match events_3[0] {
322                                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
323                                                 assert_eq!(*payment_preimage, payment_preimage_1);
324                                                 assert_eq!(*payment_hash, payment_hash_1);
325                                         },
326                                         _ => panic!("Unexpected event"),
327                                 }
328
329                                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
330                                 check_added_monitors!(nodes[0], 1);
331                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
332                         }
333
334                         (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
335                 },
336                 _ => panic!("Unexpected event"),
337         };
338
339         if disconnect_count & !disconnect_flags > 0 {
340                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
341                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
342         }
343
344         // Now fix monitor updating...
345         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
346         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
347         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
348         check_added_monitors!(nodes[0], 0);
349
350         macro_rules! disconnect_reconnect_peers { () => { {
351                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
352                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
353
354                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
355                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
356                 assert_eq!(reestablish_1.len(), 1);
357                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
358                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
359                 assert_eq!(reestablish_2.len(), 1);
360
361                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
362                 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
363                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
364                 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
365
366                 assert!(as_resp.0.is_none());
367                 assert!(bs_resp.0.is_none());
368
369                 (reestablish_1, reestablish_2, as_resp, bs_resp)
370         } } }
371
372         let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
373                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
374                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
375
376                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
377                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
378                 assert_eq!(reestablish_1.len(), 1);
379                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
380                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
381                 assert_eq!(reestablish_2.len(), 1);
382
383                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
384                 check_added_monitors!(nodes[0], 0);
385                 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
386                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
387                 check_added_monitors!(nodes[1], 0);
388                 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
389
390                 assert!(as_resp.0.is_none());
391                 assert!(bs_resp.0.is_none());
392
393                 assert!(bs_resp.1.is_none());
394                 if (disconnect_count & 16) == 0 {
395                         assert!(bs_resp.2.is_none());
396
397                         assert!(as_resp.1.is_some());
398                         assert!(as_resp.2.is_some());
399                         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
400                 } else {
401                         assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
402                         assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
403                         assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
404                         assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
405                         assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
406                         assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
407
408                         assert!(as_resp.1.is_none());
409
410                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
411                         let events_3 = nodes[0].node.get_and_clear_pending_events();
412                         assert_eq!(events_3.len(), 1);
413                         match events_3[0] {
414                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
415                                         assert_eq!(*payment_preimage, payment_preimage_1);
416                                         assert_eq!(*payment_hash, payment_hash_1);
417                                 },
418                                 _ => panic!("Unexpected event"),
419                         }
420
421                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
422                         let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
423                         // No commitment_signed so get_event_msg's assert(len == 1) passes
424                         check_added_monitors!(nodes[0], 1);
425
426                         as_resp.1 = Some(as_resp_raa);
427                         bs_resp.2 = None;
428                 }
429
430                 if disconnect_count & !disconnect_flags > 1 {
431                         let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
432
433                         if (disconnect_count & 16) == 0 {
434                                 assert!(reestablish_1 == second_reestablish_1);
435                                 assert!(reestablish_2 == second_reestablish_2);
436                         }
437                         assert!(as_resp == second_as_resp);
438                         assert!(bs_resp == second_bs_resp);
439                 }
440
441                 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
442         } else {
443                 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
444                 assert_eq!(events_4.len(), 2);
445                 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
446                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
447                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
448                                 msg.clone()
449                         },
450                         _ => panic!("Unexpected event"),
451                 })
452         };
453
454         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
455
456         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
457         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
458         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
459         // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
460         check_added_monitors!(nodes[1], 1);
461
462         if disconnect_count & !disconnect_flags > 2 {
463                 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
464
465                 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
466                 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
467
468                 assert!(as_resp.2.is_none());
469                 assert!(bs_resp.2.is_none());
470         }
471
472         let as_commitment_update;
473         let bs_second_commitment_update;
474
475         macro_rules! handle_bs_raa { () => {
476                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
477                 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
478                 assert!(as_commitment_update.update_add_htlcs.is_empty());
479                 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
480                 assert!(as_commitment_update.update_fail_htlcs.is_empty());
481                 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
482                 assert!(as_commitment_update.update_fee.is_none());
483                 check_added_monitors!(nodes[0], 1);
484         } }
485
486         macro_rules! handle_initial_raa { () => {
487                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
488                 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
489                 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
490                 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
491                 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
492                 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
493                 assert!(bs_second_commitment_update.update_fee.is_none());
494                 check_added_monitors!(nodes[1], 1);
495         } }
496
497         if (disconnect_count & 8) == 0 {
498                 handle_bs_raa!();
499
500                 if disconnect_count & !disconnect_flags > 3 {
501                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
502
503                         assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
504                         assert!(bs_resp.1.is_none());
505
506                         assert!(as_resp.2.unwrap() == as_commitment_update);
507                         assert!(bs_resp.2.is_none());
508
509                         assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
510                 }
511
512                 handle_initial_raa!();
513
514                 if disconnect_count & !disconnect_flags > 4 {
515                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
516
517                         assert!(as_resp.1.is_none());
518                         assert!(bs_resp.1.is_none());
519
520                         assert!(as_resp.2.unwrap() == as_commitment_update);
521                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
522                 }
523         } else {
524                 handle_initial_raa!();
525
526                 if disconnect_count & !disconnect_flags > 3 {
527                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
528
529                         assert!(as_resp.1.is_none());
530                         assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
531
532                         assert!(as_resp.2.is_none());
533                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
534
535                         assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
536                 }
537
538                 handle_bs_raa!();
539
540                 if disconnect_count & !disconnect_flags > 4 {
541                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
542
543                         assert!(as_resp.1.is_none());
544                         assert!(bs_resp.1.is_none());
545
546                         assert!(as_resp.2.unwrap() == as_commitment_update);
547                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
548                 }
549         }
550
551         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
552         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
553         // No commitment_signed so get_event_msg's assert(len == 1) passes
554         check_added_monitors!(nodes[0], 1);
555
556         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
557         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
558         // No commitment_signed so get_event_msg's assert(len == 1) passes
559         check_added_monitors!(nodes[1], 1);
560
561         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
562         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
563         check_added_monitors!(nodes[1], 1);
564
565         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
566         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
567         check_added_monitors!(nodes[0], 1);
568         expect_payment_path_successful!(nodes[0]);
569
570         expect_pending_htlcs_forwardable!(nodes[1]);
571
572         let events_5 = nodes[1].node.get_and_clear_pending_events();
573         assert_eq!(events_5.len(), 1);
574         match events_5[0] {
575                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
576                         assert_eq!(payment_hash_2, *payment_hash);
577                         assert_eq!(amount_msat, 1_000_000);
578                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
579                         assert_eq!(via_channel_id, Some(channel_id));
580                         match &purpose {
581                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
582                                         assert!(payment_preimage.is_none());
583                                         assert_eq!(payment_secret_2, *payment_secret);
584                                 },
585                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
586                         }
587                 },
588                 _ => panic!("Unexpected event"),
589         }
590
591         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
592 }
593
594 #[test]
595 fn test_monitor_temporary_update_fail_a() {
596         do_test_monitor_temporary_update_fail(0);
597         do_test_monitor_temporary_update_fail(1);
598         do_test_monitor_temporary_update_fail(2);
599         do_test_monitor_temporary_update_fail(3);
600         do_test_monitor_temporary_update_fail(4);
601         do_test_monitor_temporary_update_fail(5);
602 }
603
604 #[test]
605 fn test_monitor_temporary_update_fail_b() {
606         do_test_monitor_temporary_update_fail(2 | 8);
607         do_test_monitor_temporary_update_fail(3 | 8);
608         do_test_monitor_temporary_update_fail(4 | 8);
609         do_test_monitor_temporary_update_fail(5 | 8);
610 }
611
612 #[test]
613 fn test_monitor_temporary_update_fail_c() {
614         do_test_monitor_temporary_update_fail(1 | 16);
615         do_test_monitor_temporary_update_fail(2 | 16);
616         do_test_monitor_temporary_update_fail(3 | 16);
617         do_test_monitor_temporary_update_fail(2 | 8 | 16);
618         do_test_monitor_temporary_update_fail(3 | 8 | 16);
619 }
620
621 #[test]
622 fn test_monitor_update_fail_cs() {
623         // Tests handling of a monitor update failure when processing an incoming commitment_signed
624         let chanmon_cfgs = create_chanmon_cfgs(2);
625         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
626         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
627         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
628         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
629
630         let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
631         {
632                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
633                 check_added_monitors!(nodes[0], 1);
634         }
635
636         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
637         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
638
639         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
640         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
641         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
642         check_added_monitors!(nodes[1], 1);
643         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
644
645         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
646         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
647         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
648         check_added_monitors!(nodes[1], 0);
649         let responses = nodes[1].node.get_and_clear_pending_msg_events();
650         assert_eq!(responses.len(), 2);
651
652         match responses[0] {
653                 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
654                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
655                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
656                         check_added_monitors!(nodes[0], 1);
657                 },
658                 _ => panic!("Unexpected event"),
659         }
660         match responses[1] {
661                 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
662                         assert!(updates.update_add_htlcs.is_empty());
663                         assert!(updates.update_fulfill_htlcs.is_empty());
664                         assert!(updates.update_fail_htlcs.is_empty());
665                         assert!(updates.update_fail_malformed_htlcs.is_empty());
666                         assert!(updates.update_fee.is_none());
667                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
668
669                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
670                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
671                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
672                         check_added_monitors!(nodes[0], 1);
673                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
674                 },
675                 _ => panic!("Unexpected event"),
676         }
677
678         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
679         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
680         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
681         check_added_monitors!(nodes[0], 0);
682
683         let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
684         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
685         check_added_monitors!(nodes[1], 1);
686
687         expect_pending_htlcs_forwardable!(nodes[1]);
688
689         let events = nodes[1].node.get_and_clear_pending_events();
690         assert_eq!(events.len(), 1);
691         match events[0] {
692                 Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
693                         assert_eq!(payment_hash, our_payment_hash);
694                         assert_eq!(amount_msat, 1_000_000);
695                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
696                         assert_eq!(via_channel_id, Some(channel_id));
697                         match &purpose {
698                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
699                                         assert!(payment_preimage.is_none());
700                                         assert_eq!(our_payment_secret, *payment_secret);
701                                 },
702                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
703                         }
704                 },
705                 _ => panic!("Unexpected event"),
706         };
707
708         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
709 }
710
711 #[test]
712 fn test_monitor_update_fail_no_rebroadcast() {
713         // Tests handling of a monitor update failure when no message rebroadcasting on
714         // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
715         // fuzz tests.
716         let chanmon_cfgs = create_chanmon_cfgs(2);
717         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
718         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
719         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
720         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
721
722         let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
723         {
724                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap();
725                 check_added_monitors!(nodes[0], 1);
726         }
727
728         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
729         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
730         let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
731
732         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
733         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
734         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
735         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
736         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
737         check_added_monitors!(nodes[1], 1);
738
739         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
740         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
741         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
742         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
743         check_added_monitors!(nodes[1], 0);
744         expect_pending_htlcs_forwardable!(nodes[1]);
745
746         let events = nodes[1].node.get_and_clear_pending_events();
747         assert_eq!(events.len(), 1);
748         match events[0] {
749                 Event::PaymentClaimable { payment_hash, .. } => {
750                         assert_eq!(payment_hash, our_payment_hash);
751                 },
752                 _ => panic!("Unexpected event"),
753         }
754
755         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
756 }
757
758 #[test]
759 fn test_monitor_update_raa_while_paused() {
760         // Tests handling of an RAA while monitor updating has already been marked failed.
761         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
762         let chanmon_cfgs = create_chanmon_cfgs(2);
763         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
764         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
765         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
766         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
767
768         send_payment(&nodes[0], &[&nodes[1]], 5000000);
769         let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
770         {
771                 nodes[0].node.send_payment(&route, our_payment_hash_1, &Some(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
772                 check_added_monitors!(nodes[0], 1);
773         }
774         let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
775
776         let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000);
777         {
778                 nodes[1].node.send_payment(&route, our_payment_hash_2, &Some(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap();
779                 check_added_monitors!(nodes[1], 1);
780         }
781         let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
782
783         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
784         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
785         check_added_monitors!(nodes[1], 1);
786         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
787
788         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
789         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
790         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
791         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
792         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
793         check_added_monitors!(nodes[0], 1);
794         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
795
796         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
797         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
798         check_added_monitors!(nodes[0], 1);
799
800         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
801         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
802         check_added_monitors!(nodes[0], 0);
803
804         let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
805         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
806         check_added_monitors!(nodes[1], 1);
807         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
808
809         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
810         check_added_monitors!(nodes[1], 1);
811         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
812
813         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
814         check_added_monitors!(nodes[0], 1);
815         let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
816
817         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
818         check_added_monitors!(nodes[0], 1);
819         expect_pending_htlcs_forwardable!(nodes[0]);
820         expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
821
822         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
823         check_added_monitors!(nodes[1], 1);
824         expect_pending_htlcs_forwardable!(nodes[1]);
825         expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
826
827         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
828         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
829 }
830
831 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
832         // Tests handling of a monitor update failure when processing an incoming RAA
833         let chanmon_cfgs = create_chanmon_cfgs(3);
834         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
835         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
836         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
837         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
838         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
839
840         // Rebalance a bit so that we can send backwards from 2 to 1.
841         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
842
843         // Route a first payment that we'll fail backwards
844         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
845
846         // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
847         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
848         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
849         check_added_monitors!(nodes[2], 1);
850
851         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
852         assert!(updates.update_add_htlcs.is_empty());
853         assert!(updates.update_fulfill_htlcs.is_empty());
854         assert_eq!(updates.update_fail_htlcs.len(), 1);
855         assert!(updates.update_fail_malformed_htlcs.is_empty());
856         assert!(updates.update_fee.is_none());
857         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
858
859         let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
860         check_added_monitors!(nodes[0], 0);
861
862         // While the second channel is AwaitingRAA, forward a second payment to get it into the
863         // holding cell.
864         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
865         {
866                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
867                 check_added_monitors!(nodes[0], 1);
868         }
869
870         let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
871         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
872         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
873
874         expect_pending_htlcs_forwardable!(nodes[1]);
875         check_added_monitors!(nodes[1], 0);
876         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
877
878         // Now fail monitor updating.
879         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
880         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
881         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
882         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
883         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
884         check_added_monitors!(nodes[1], 1);
885
886         // Forward a third payment which will also be added to the holding cell, despite the channel
887         // being paused waiting a monitor update.
888         let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
889         {
890                 nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
891                 check_added_monitors!(nodes[0], 1);
892         }
893
894         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
895         send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
896         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
897         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
898         check_added_monitors!(nodes[1], 0);
899
900         // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
901         // and not forwarded.
902         expect_pending_htlcs_forwardable!(nodes[1]);
903         check_added_monitors!(nodes[1], 0);
904         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
905
906         let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
907                 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
908                 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
909                 nodes[2].node.send_payment(&route, payment_hash_4, &Some(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
910                 check_added_monitors!(nodes[2], 1);
911
912                 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
913                 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
914                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
915                 check_added_monitors!(nodes[1], 1);
916                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
917                 (Some(payment_preimage_4), Some(payment_hash_4))
918         } else { (None, None) };
919
920         // Restore monitor updating, ensuring we immediately get a fail-back update and a
921         // update_add update.
922         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
923         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
924         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
925         check_added_monitors!(nodes[1], 0);
926         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
927         check_added_monitors!(nodes[1], 1);
928
929         let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
930         if test_ignore_second_cs {
931                 assert_eq!(events_3.len(), 3);
932         } else {
933                 assert_eq!(events_3.len(), 2);
934         }
935
936         // Note that the ordering of the events for different nodes is non-prescriptive, though the
937         // ordering of the two events that both go to nodes[2] have to stay in the same order.
938         let messages_a = match events_3.pop().unwrap() {
939                 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
940                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
941                         assert!(updates.update_fulfill_htlcs.is_empty());
942                         assert_eq!(updates.update_fail_htlcs.len(), 1);
943                         assert!(updates.update_fail_malformed_htlcs.is_empty());
944                         assert!(updates.update_add_htlcs.is_empty());
945                         assert!(updates.update_fee.is_none());
946                         (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
947                 },
948                 _ => panic!("Unexpected event type!"),
949         };
950         let raa = if test_ignore_second_cs {
951                 match events_3.remove(1) {
952                         MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
953                                 assert_eq!(node_id, nodes[2].node.get_our_node_id());
954                                 Some(msg.clone())
955                         },
956                         _ => panic!("Unexpected event"),
957                 }
958         } else { None };
959         let send_event_b = SendEvent::from_event(events_3.remove(0));
960         assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
961
962         // Now deliver the new messages...
963
964         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
965         commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
966         expect_payment_failed!(nodes[0], payment_hash_1, true);
967
968         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
969         let as_cs;
970         if test_ignore_second_cs {
971                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
972                 check_added_monitors!(nodes[2], 1);
973                 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
974                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
975                 check_added_monitors!(nodes[2], 1);
976                 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
977                 assert!(bs_cs.update_add_htlcs.is_empty());
978                 assert!(bs_cs.update_fail_htlcs.is_empty());
979                 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
980                 assert!(bs_cs.update_fulfill_htlcs.is_empty());
981                 assert!(bs_cs.update_fee.is_none());
982
983                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
984                 check_added_monitors!(nodes[1], 1);
985                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
986
987                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
988                 check_added_monitors!(nodes[1], 1);
989         } else {
990                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
991                 check_added_monitors!(nodes[2], 1);
992
993                 let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
994                 assert_eq!(bs_revoke_and_commit.len(), 2);
995                 match bs_revoke_and_commit[0] {
996                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
997                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
998                                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
999                                 check_added_monitors!(nodes[1], 1);
1000                         },
1001                         _ => panic!("Unexpected event"),
1002                 }
1003
1004                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1005
1006                 match bs_revoke_and_commit[1] {
1007                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1008                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1009                                 assert!(updates.update_add_htlcs.is_empty());
1010                                 assert!(updates.update_fail_htlcs.is_empty());
1011                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
1012                                 assert!(updates.update_fulfill_htlcs.is_empty());
1013                                 assert!(updates.update_fee.is_none());
1014                                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1015                                 check_added_monitors!(nodes[1], 1);
1016                         },
1017                         _ => panic!("Unexpected event"),
1018                 }
1019         }
1020
1021         assert_eq!(as_cs.update_add_htlcs.len(), 1);
1022         assert!(as_cs.update_fail_htlcs.is_empty());
1023         assert!(as_cs.update_fail_malformed_htlcs.is_empty());
1024         assert!(as_cs.update_fulfill_htlcs.is_empty());
1025         assert!(as_cs.update_fee.is_none());
1026         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1027
1028
1029         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
1030         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
1031         check_added_monitors!(nodes[2], 1);
1032         let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1033
1034         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1035         check_added_monitors!(nodes[2], 1);
1036         let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1037
1038         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
1039         check_added_monitors!(nodes[1], 1);
1040         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1041
1042         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1043         check_added_monitors!(nodes[1], 1);
1044         let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1045
1046         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1047         check_added_monitors!(nodes[2], 1);
1048         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1049
1050         expect_pending_htlcs_forwardable!(nodes[2]);
1051
1052         let events_6 = nodes[2].node.get_and_clear_pending_events();
1053         assert_eq!(events_6.len(), 2);
1054         match events_6[0] {
1055                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
1056                 _ => panic!("Unexpected event"),
1057         };
1058         match events_6[1] {
1059                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1060                 _ => panic!("Unexpected event"),
1061         };
1062
1063         if test_ignore_second_cs {
1064                 expect_pending_htlcs_forwardable!(nodes[1]);
1065                 check_added_monitors!(nodes[1], 1);
1066
1067                 send_event = SendEvent::from_node(&nodes[1]);
1068                 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
1069                 assert_eq!(send_event.msgs.len(), 1);
1070                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
1071                 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
1072
1073                 expect_pending_htlcs_forwardable!(nodes[0]);
1074
1075                 let events_9 = nodes[0].node.get_and_clear_pending_events();
1076                 assert_eq!(events_9.len(), 1);
1077                 match events_9[0] {
1078                         Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
1079                         _ => panic!("Unexpected event"),
1080                 };
1081                 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
1082         }
1083
1084         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
1085 }
1086
1087 #[test]
1088 fn test_monitor_update_fail_raa() {
1089         do_test_monitor_update_fail_raa(false);
1090         do_test_monitor_update_fail_raa(true);
1091 }
1092
1093 #[test]
1094 fn test_monitor_update_fail_reestablish() {
1095         // Simple test for message retransmission after monitor update failure on
1096         // channel_reestablish generating a monitor update (which comes from freeing holding cell
1097         // HTLCs).
1098         let chanmon_cfgs = create_chanmon_cfgs(3);
1099         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1100         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1101         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1102         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1103         create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1104
1105         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
1106
1107         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1108         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1109
1110         nodes[2].node.claim_funds(payment_preimage);
1111         check_added_monitors!(nodes[2], 1);
1112         expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
1113
1114         let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1115         assert!(updates.update_add_htlcs.is_empty());
1116         assert!(updates.update_fail_htlcs.is_empty());
1117         assert!(updates.update_fail_malformed_htlcs.is_empty());
1118         assert!(updates.update_fee.is_none());
1119         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1120         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1121         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
1122         check_added_monitors!(nodes[1], 1);
1123         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1124         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1125
1126         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1127         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
1128         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
1129
1130         let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1131         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1132
1133         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1134
1135         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1136         assert_eq!(
1137                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1138                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1139
1140         nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
1141         check_added_monitors!(nodes[1], 1);
1142
1143         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1144         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1145
1146         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
1147         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
1148
1149         assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
1150         assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
1151
1152         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1153         assert_eq!(
1154                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1155                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1156
1157         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1158         check_added_monitors!(nodes[1], 0);
1159         assert_eq!(
1160                 get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
1161                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1162
1163         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1164         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1165         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1166         check_added_monitors!(nodes[1], 0);
1167
1168         updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1169         assert!(updates.update_add_htlcs.is_empty());
1170         assert!(updates.update_fail_htlcs.is_empty());
1171         assert!(updates.update_fail_malformed_htlcs.is_empty());
1172         assert!(updates.update_fee.is_none());
1173         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1174         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1175         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1176         expect_payment_sent!(nodes[0], payment_preimage);
1177 }
1178
1179 #[test]
1180 fn raa_no_response_awaiting_raa_state() {
1181         // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1182         // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1183         // in question (assuming it intends to respond with a CS after monitor updating is restored).
1184         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1185         let chanmon_cfgs = create_chanmon_cfgs(2);
1186         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1187         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1188         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1189         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1190
1191         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1192         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1193         let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
1194
1195         // Queue up two payments - one will be delivered right away, one immediately goes into the
1196         // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1197         // immediately after a CS. By setting failing the monitor update failure from the CS (which
1198         // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1199         // generation during RAA while in monitor-update-failed state.
1200         {
1201                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1202                 check_added_monitors!(nodes[0], 1);
1203                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1204                 check_added_monitors!(nodes[0], 0);
1205         }
1206
1207         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1208         assert_eq!(events.len(), 1);
1209         let payment_event = SendEvent::from_event(events.pop().unwrap());
1210         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1211         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1212         check_added_monitors!(nodes[1], 1);
1213
1214         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1215         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1216         check_added_monitors!(nodes[0], 1);
1217         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1218         assert_eq!(events.len(), 1);
1219         let payment_event = SendEvent::from_event(events.pop().unwrap());
1220
1221         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1222         check_added_monitors!(nodes[0], 1);
1223         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1224
1225         // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1226         // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1227         // then restore channel monitor updates.
1228         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1229         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1230         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1231         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1232         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1233         check_added_monitors!(nodes[1], 1);
1234         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1235
1236         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1237         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1238         check_added_monitors!(nodes[1], 1);
1239
1240         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1241         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1242         // nodes[1] should be AwaitingRAA here!
1243         check_added_monitors!(nodes[1], 0);
1244         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1245         expect_pending_htlcs_forwardable!(nodes[1]);
1246         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1247
1248         // We send a third payment here, which is somewhat of a redundant test, but the
1249         // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1250         // commitment transaction states) whereas here we can explicitly check for it.
1251         {
1252                 nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1253                 check_added_monitors!(nodes[0], 0);
1254                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1255         }
1256         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1257         check_added_monitors!(nodes[0], 1);
1258         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1259         assert_eq!(events.len(), 1);
1260         let payment_event = SendEvent::from_event(events.pop().unwrap());
1261
1262         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1263         check_added_monitors!(nodes[0], 1);
1264         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1265
1266         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1267         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1268         check_added_monitors!(nodes[1], 1);
1269         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1270
1271         // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1272         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1273         check_added_monitors!(nodes[1], 1);
1274         expect_pending_htlcs_forwardable!(nodes[1]);
1275         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1276         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1277
1278         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1279         check_added_monitors!(nodes[0], 1);
1280
1281         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1282         check_added_monitors!(nodes[0], 1);
1283         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1284
1285         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1286         check_added_monitors!(nodes[1], 1);
1287         expect_pending_htlcs_forwardable!(nodes[1]);
1288         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
1289
1290         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1291         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1292         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
1293 }
1294
1295 #[test]
1296 fn claim_while_disconnected_monitor_update_fail() {
1297         // Test for claiming a payment while disconnected and then having the resulting
1298         // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1299         // contrived case for nodes with network instability.
1300         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1301         // code introduced a regression in this test (specifically, this caught a removal of the
1302         // channel_reestablish handling ensuring the order was sensical given the messages used).
1303         let chanmon_cfgs = create_chanmon_cfgs(2);
1304         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1305         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1306         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1307         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1308
1309         // Forward a payment for B to claim
1310         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1311
1312         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1313         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1314
1315         nodes[1].node.claim_funds(payment_preimage_1);
1316         check_added_monitors!(nodes[1], 1);
1317         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1318
1319         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
1320         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
1321
1322         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1323         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1324
1325         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1326         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1327
1328         // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1329         // update.
1330         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1331
1332         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1333         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1334         check_added_monitors!(nodes[1], 1);
1335         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1336
1337         // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1338         // the monitor still failed
1339         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1340         {
1341                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1342                 check_added_monitors!(nodes[0], 1);
1343         }
1344
1345         let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1346         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1347         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1348         check_added_monitors!(nodes[1], 1);
1349         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1350         // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1351         // until we've channel_monitor_update'd and updated for the new commitment transaction.
1352
1353         // Now un-fail the monitor, which will result in B sending its original commitment update,
1354         // receiving the commitment update from A, and the resulting commitment dances.
1355         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1356         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1357         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1358         check_added_monitors!(nodes[1], 0);
1359
1360         let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1361         assert_eq!(bs_msgs.len(), 2);
1362
1363         match bs_msgs[0] {
1364                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1365                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1366                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1367                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1368                         check_added_monitors!(nodes[0], 1);
1369
1370                         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1371                         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1372                         check_added_monitors!(nodes[1], 1);
1373                 },
1374                 _ => panic!("Unexpected event"),
1375         }
1376
1377         match bs_msgs[1] {
1378                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1379                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1380                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1381                         check_added_monitors!(nodes[0], 1);
1382                 },
1383                 _ => panic!("Unexpected event"),
1384         }
1385
1386         let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1387
1388         let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1389         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1390         check_added_monitors!(nodes[0], 1);
1391         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1392
1393         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1394         check_added_monitors!(nodes[1], 1);
1395         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1396         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1397         check_added_monitors!(nodes[1], 1);
1398
1399         expect_pending_htlcs_forwardable!(nodes[1]);
1400         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1401
1402         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1403         check_added_monitors!(nodes[0], 1);
1404         expect_payment_sent!(nodes[0], payment_preimage_1);
1405
1406         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1407 }
1408
1409 #[test]
1410 fn monitor_failed_no_reestablish_response() {
1411         // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1412         // response to a commitment_signed.
1413         // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1414         // debug_assert!() failure in channel_reestablish handling.
1415         let chanmon_cfgs = create_chanmon_cfgs(2);
1416         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1417         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1418         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1419         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1420         {
1421                 let mut node_0_per_peer_lock;
1422                 let mut node_0_peer_state_lock;
1423                 let mut node_1_per_peer_lock;
1424                 let mut node_1_peer_state_lock;
1425                 get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1426                 get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1427         }
1428
1429         // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1430         // on receipt).
1431         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1432         {
1433                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1434                 check_added_monitors!(nodes[0], 1);
1435         }
1436
1437         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1438         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1439         assert_eq!(events.len(), 1);
1440         let payment_event = SendEvent::from_event(events.pop().unwrap());
1441         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1442         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1443         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1444         check_added_monitors!(nodes[1], 1);
1445
1446         // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1447         // is still failing to update monitors.
1448         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1449         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1450
1451         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
1452         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
1453
1454         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1455         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1456
1457         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1458         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1459         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1460         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1461
1462         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1463         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1464         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1465         check_added_monitors!(nodes[1], 0);
1466         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1467
1468         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1469         check_added_monitors!(nodes[0], 1);
1470         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1471         check_added_monitors!(nodes[0], 1);
1472
1473         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1474         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1475         check_added_monitors!(nodes[1], 1);
1476
1477         expect_pending_htlcs_forwardable!(nodes[1]);
1478         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1479
1480         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1481 }
1482
1483 #[test]
1484 fn first_message_on_recv_ordering() {
1485         // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1486         // messages, we're willing to flip the order of response messages if neccessary in resposne to
1487         // a commitment_signed which needs to send an RAA first.
1488         // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1489         // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1490         // response. To do this, we start routing two payments, with the final RAA for the first being
1491         // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1492         // have no pending response but will want to send a RAA/CS (with the updates for the second
1493         // payment applied).
1494         // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1495         let chanmon_cfgs = create_chanmon_cfgs(2);
1496         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1497         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1498         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1499         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1500
1501         // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1502         // can deliver it and fail the monitor update.
1503         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1504         {
1505                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1506                 check_added_monitors!(nodes[0], 1);
1507         }
1508
1509         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1510         assert_eq!(events.len(), 1);
1511         let payment_event = SendEvent::from_event(events.pop().unwrap());
1512         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1513         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1514         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1515         check_added_monitors!(nodes[1], 1);
1516         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1517
1518         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1519         check_added_monitors!(nodes[0], 1);
1520         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1521         check_added_monitors!(nodes[0], 1);
1522
1523         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1524
1525         // Route the second payment, generating an update_add_htlc/commitment_signed
1526         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1527         {
1528                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1529                 check_added_monitors!(nodes[0], 1);
1530         }
1531         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1532         assert_eq!(events.len(), 1);
1533         let payment_event = SendEvent::from_event(events.pop().unwrap());
1534         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1535
1536         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1537
1538         // Deliver the final RAA for the first payment, which does not require a response. RAAs
1539         // generally require a commitment_signed, so the fact that we're expecting an opposite response
1540         // to the next message also tests resetting the delivery order.
1541         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1542         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1543         check_added_monitors!(nodes[1], 1);
1544
1545         // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1546         // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1547         // appropriate HTLC acceptance).
1548         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1549         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1550         check_added_monitors!(nodes[1], 1);
1551         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1552
1553         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1554         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1555         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1556         check_added_monitors!(nodes[1], 0);
1557
1558         expect_pending_htlcs_forwardable!(nodes[1]);
1559         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1560
1561         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1562         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1563         check_added_monitors!(nodes[0], 1);
1564         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1565         check_added_monitors!(nodes[0], 1);
1566
1567         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1568         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1569         check_added_monitors!(nodes[1], 1);
1570
1571         expect_pending_htlcs_forwardable!(nodes[1]);
1572         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1573
1574         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1575         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1576 }
1577
1578 #[test]
1579 fn test_monitor_update_fail_claim() {
1580         // Basic test for monitor update failures when processing claim_funds calls.
1581         // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1582         // update to claim the payment. We then send two payments C->B->A, which are held at B.
1583         // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1584         // the payments from C onwards to A.
1585         let chanmon_cfgs = create_chanmon_cfgs(3);
1586         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1587         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1588         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1589         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1590         create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1591
1592         // Rebalance a bit so that we can send backwards from 3 to 2.
1593         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1594
1595         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1596
1597         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1598         nodes[1].node.claim_funds(payment_preimage_1);
1599         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1600         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1601         check_added_monitors!(nodes[1], 1);
1602
1603         // Note that at this point there is a pending commitment transaction update for A being held by
1604         // B. Even when we go to send the payment from C through B to A, B will not update this
1605         // already-signed commitment transaction and will instead wait for it to resolve before
1606         // forwarding the payment onwards.
1607
1608         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000);
1609         {
1610                 nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1611                 check_added_monitors!(nodes[2], 1);
1612         }
1613
1614         // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1615         // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1616         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1617
1618         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1619         assert_eq!(events.len(), 1);
1620         let payment_event = SendEvent::from_event(events.pop().unwrap());
1621         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1622         let events = nodes[1].node.get_and_clear_pending_msg_events();
1623         assert_eq!(events.len(), 0);
1624         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1625
1626         let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1627         nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1628         check_added_monitors!(nodes[2], 1);
1629
1630         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1631         assert_eq!(events.len(), 1);
1632         let payment_event = SendEvent::from_event(events.pop().unwrap());
1633         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1634         let events = nodes[1].node.get_and_clear_pending_msg_events();
1635         assert_eq!(events.len(), 0);
1636         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1637
1638         // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1639         let channel_id = chan_1.2;
1640         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1641         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1642         check_added_monitors!(nodes[1], 0);
1643
1644         let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1645         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1646         commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1647         expect_payment_sent!(nodes[0], payment_preimage_1);
1648
1649         // Get the payment forwards, note that they were batched into one commitment update.
1650         expect_pending_htlcs_forwardable!(nodes[1]);
1651         check_added_monitors!(nodes[1], 1);
1652         let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1653         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1654         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1655         commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1656         expect_pending_htlcs_forwardable!(nodes[0]);
1657
1658         let events = nodes[0].node.get_and_clear_pending_events();
1659         assert_eq!(events.len(), 2);
1660         match events[0] {
1661                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id } => {
1662                         assert_eq!(payment_hash_2, *payment_hash);
1663                         assert_eq!(1_000_000, amount_msat);
1664                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1665                         assert_eq!(via_channel_id, Some(channel_id));
1666                         assert_eq!(via_user_channel_id, Some(42));
1667                         match &purpose {
1668                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1669                                         assert!(payment_preimage.is_none());
1670                                         assert_eq!(payment_secret_2, *payment_secret);
1671                                 },
1672                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1673                         }
1674                 },
1675                 _ => panic!("Unexpected event"),
1676         }
1677         match events[1] {
1678                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
1679                         assert_eq!(payment_hash_3, *payment_hash);
1680                         assert_eq!(1_000_000, amount_msat);
1681                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1682                         assert_eq!(via_channel_id, Some(channel_id));
1683                         match &purpose {
1684                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1685                                         assert!(payment_preimage.is_none());
1686                                         assert_eq!(payment_secret_3, *payment_secret);
1687                                 },
1688                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1689                         }
1690                 },
1691                 _ => panic!("Unexpected event"),
1692         }
1693 }
1694
1695 #[test]
1696 fn test_monitor_update_on_pending_forwards() {
1697         // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1698         // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1699         // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1700         // from C to A will be pending a forward to A.
1701         let chanmon_cfgs = create_chanmon_cfgs(3);
1702         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1703         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1704         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1705         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1706         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1707
1708         // Rebalance a bit so that we can send backwards from 3 to 1.
1709         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1710
1711         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1712         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
1713         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
1714         check_added_monitors!(nodes[2], 1);
1715
1716         let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1717         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1718         commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1719         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1720
1721         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
1722         {
1723                 nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1724                 check_added_monitors!(nodes[2], 1);
1725         }
1726
1727         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1728         assert_eq!(events.len(), 1);
1729         let payment_event = SendEvent::from_event(events.pop().unwrap());
1730         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1731         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1732
1733         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1734         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1735         check_added_monitors!(nodes[1], 1);
1736         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1737
1738         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1739         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1740         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1741         check_added_monitors!(nodes[1], 0);
1742
1743         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1744         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1745         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1746         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1747
1748         let events = nodes[0].node.get_and_clear_pending_events();
1749         assert_eq!(events.len(), 2);
1750         if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[0] {
1751                 assert_eq!(payment_hash, payment_hash_1);
1752                 assert!(payment_failed_permanently);
1753         } else { panic!("Unexpected event!"); }
1754         match events[1] {
1755                 Event::PendingHTLCsForwardable { .. } => { },
1756                 _ => panic!("Unexpected event"),
1757         };
1758         nodes[0].node.process_pending_htlc_forwards();
1759         expect_payment_claimable!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
1760
1761         claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
1762 }
1763
1764 #[test]
1765 fn monitor_update_claim_fail_no_response() {
1766         // Test for claim_funds resulting in both a monitor update failure and no message response (due
1767         // to channel being AwaitingRAA).
1768         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1769         // code was broken.
1770         let chanmon_cfgs = create_chanmon_cfgs(2);
1771         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1772         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1773         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1774         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1775
1776         // Forward a payment for B to claim
1777         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1778
1779         // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1780         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1781         {
1782                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1783                 check_added_monitors!(nodes[0], 1);
1784         }
1785
1786         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1787         assert_eq!(events.len(), 1);
1788         let payment_event = SendEvent::from_event(events.pop().unwrap());
1789         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1790         let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1791
1792         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1793         nodes[1].node.claim_funds(payment_preimage_1);
1794         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1795         check_added_monitors!(nodes[1], 1);
1796
1797         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1798
1799         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1800         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1801         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1802         check_added_monitors!(nodes[1], 0);
1803         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1804
1805         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1806         check_added_monitors!(nodes[1], 1);
1807         expect_pending_htlcs_forwardable!(nodes[1]);
1808         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1809
1810         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1811         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1812         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1813         expect_payment_sent!(nodes[0], payment_preimage_1);
1814
1815         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1816 }
1817
1818 // restore_b_before_conf has no meaning if !confirm_a_first
1819 // restore_b_before_lock has no meaning if confirm_a_first
1820 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) {
1821         // Test that if the monitor update generated by funding_transaction_generated fails we continue
1822         // the channel setup happily after the update is restored.
1823         let chanmon_cfgs = create_chanmon_cfgs(2);
1824         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1825         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1826         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1827
1828         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1829         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1830         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1831
1832         let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
1833
1834         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1835         check_added_monitors!(nodes[0], 0);
1836
1837         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1838         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1839         let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1840         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1841         check_added_monitors!(nodes[1], 1);
1842
1843         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1844         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1845         check_added_monitors!(nodes[0], 1);
1846         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1847         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1848         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1849         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1850         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1851         check_added_monitors!(nodes[0], 0);
1852
1853         let events = nodes[0].node.get_and_clear_pending_events();
1854         assert_eq!(events.len(), 0);
1855         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1856         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].txid(), funding_output.txid);
1857
1858         if confirm_a_first {
1859                 confirm_transaction(&nodes[0], &funding_tx);
1860                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1861                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1862                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1863         } else {
1864                 assert!(!restore_b_before_conf);
1865                 confirm_transaction(&nodes[1], &funding_tx);
1866                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1867         }
1868
1869         // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
1870         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1871         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1872         reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1873         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1874         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1875
1876         if !restore_b_before_conf {
1877                 confirm_transaction(&nodes[1], &funding_tx);
1878                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1879                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1880         }
1881         if !confirm_a_first && !restore_b_before_lock {
1882                 confirm_transaction(&nodes[0], &funding_tx);
1883                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1884                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1885                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1886         }
1887
1888         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1889         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1890         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1891         check_added_monitors!(nodes[1], 0);
1892
1893         let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1894                 if !restore_b_before_lock {
1895                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1896                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1897                 } else {
1898                         nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
1899                         confirm_transaction(&nodes[0], &funding_tx);
1900                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1901                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready))
1902                 }
1903         } else {
1904                 if restore_b_before_conf {
1905                         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1906                         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1907                         confirm_transaction(&nodes[1], &funding_tx);
1908                 }
1909                 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1910                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1911         };
1912         for node in nodes.iter() {
1913                 assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
1914                 node.gossip_sync.handle_channel_update(&as_update).unwrap();
1915                 node.gossip_sync.handle_channel_update(&bs_update).unwrap();
1916         }
1917
1918         if !restore_b_before_lock {
1919                 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
1920         } else {
1921                 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
1922         }
1923
1924
1925         send_payment(&nodes[0], &[&nodes[1]], 8000000);
1926         close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1927         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
1928         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1929 }
1930
1931 #[test]
1932 fn during_funding_monitor_fail() {
1933         do_during_funding_monitor_fail(true, true, false);
1934         do_during_funding_monitor_fail(true, false, false);
1935         do_during_funding_monitor_fail(false, false, false);
1936         do_during_funding_monitor_fail(false, false, true);
1937 }
1938
1939 #[test]
1940 fn test_path_paused_mpp() {
1941         // Simple test of sending a multi-part payment where one path is currently blocked awaiting
1942         // monitor update
1943         let chanmon_cfgs = create_chanmon_cfgs(4);
1944         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1945         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1946         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1947
1948         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
1949         let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1950         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
1951         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
1952
1953         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
1954
1955         // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
1956         let path = route.paths[0].clone();
1957         route.paths.push(path);
1958         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
1959         route.paths[0][0].short_channel_id = chan_1_id;
1960         route.paths[0][1].short_channel_id = chan_3_id;
1961         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
1962         route.paths[1][0].short_channel_id = chan_2_ann.contents.short_channel_id;
1963         route.paths[1][1].short_channel_id = chan_4_id;
1964
1965         // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
1966         // (for the path 0 -> 2 -> 3) fails.
1967         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1968         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1969
1970         // Now check that we get the right return value, indicating that the first path succeeded but
1971         // the second got a MonitorUpdateInProgress err. This implies
1972         // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry.
1973         if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)) {
1974                 assert_eq!(results.len(), 2);
1975                 if let Ok(()) = results[0] {} else { panic!(); }
1976                 if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); }
1977         } else { panic!(); }
1978         check_added_monitors!(nodes[0], 2);
1979         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1980
1981         // Pass the first HTLC of the payment along to nodes[3].
1982         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1983         assert_eq!(events.len(), 1);
1984         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
1985
1986         // And check that, after we successfully update the monitor for chan_2 we can pass the second
1987         // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
1988         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
1989         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1990         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1991         assert_eq!(events.len(), 1);
1992         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
1993
1994         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
1995 }
1996
1997 #[test]
1998 fn test_pending_update_fee_ack_on_reconnect() {
1999         // In early versions of our automated fee update patch, nodes did not correctly use the
2000         // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an
2001         // undelivered commitment_signed.
2002         //
2003         // B sends A new HTLC + CS, not delivered
2004         // A sends B update_fee + CS
2005         // B receives the CS and sends RAA, previously causing B to lock in the new feerate
2006         // reconnect
2007         // B resends initial CS, using the original fee
2008
2009         let chanmon_cfgs = create_chanmon_cfgs(2);
2010         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2011         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2012         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2013
2014         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
2015         send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
2016
2017         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000);
2018         nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
2019         check_added_monitors!(nodes[1], 1);
2020         let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2021         // bs_initial_send_msgs are not delivered until they are re-generated after reconnect
2022
2023         {
2024                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2025                 *feerate_lock *= 2;
2026         }
2027         nodes[0].node.timer_tick_occurred();
2028         check_added_monitors!(nodes[0], 1);
2029         let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2030         assert!(as_update_fee_msgs.update_fee.is_some());
2031
2032         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap());
2033         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed);
2034         check_added_monitors!(nodes[1], 1);
2035         let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2036         // bs_first_raa is not delivered until it is re-generated after reconnect
2037
2038         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2039         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
2040
2041         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
2042         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2043         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
2044         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2045
2046         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2047         let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
2048         assert_eq!(bs_resend_msgs.len(), 3);
2049         if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] {
2050                 assert_eq!(*updates, bs_initial_send_msgs);
2051         } else { panic!(); }
2052         if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] {
2053                 assert_eq!(*msg, bs_first_raa);
2054         } else { panic!(); }
2055         if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); }
2056
2057         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2058         get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
2059
2060         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]);
2061         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed);
2062         check_added_monitors!(nodes[0], 1);
2063         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2064         check_added_monitors!(nodes[1], 1);
2065         let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed;
2066
2067         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2068         check_added_monitors!(nodes[0], 1);
2069         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed);
2070         check_added_monitors!(nodes[1], 1);
2071         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2072
2073         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
2074         check_added_monitors!(nodes[0], 1);
2075         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2076         check_added_monitors!(nodes[0], 1);
2077
2078         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2079         check_added_monitors!(nodes[1], 1);
2080
2081         expect_pending_htlcs_forwardable!(nodes[0]);
2082         expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000);
2083
2084         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
2085 }
2086
2087 #[test]
2088 fn test_fail_htlc_on_broadcast_after_claim() {
2089         // In an earlier version of 7e78fa660cec8a73286c94c1073ee588140e7a01 we'd also fail the inbound
2090         // channel backwards if we received an HTLC failure after a HTLC fulfillment. Here we test a
2091         // specific case of that by having the HTLC failure come from the ChannelMonitor after a dust
2092         // HTLC was not included in a confirmed commitment transaction.
2093         //
2094         // We first forward a payment, then claim it with an update_fulfill_htlc message, closing the
2095         // channel immediately before commitment occurs. After the commitment transaction reaches
2096         // ANTI_REORG_DELAY confirmations, will will try to fail the HTLC which was already fulfilled.
2097         let chanmon_cfgs = create_chanmon_cfgs(3);
2098         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2099         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2100         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2101
2102         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
2103         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
2104
2105         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
2106
2107         let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
2108         assert_eq!(bs_txn.len(), 1);
2109
2110         nodes[2].node.claim_funds(payment_preimage);
2111         check_added_monitors!(nodes[2], 1);
2112         expect_payment_claimed!(nodes[2], payment_hash, 2000);
2113
2114         let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2115         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
2116         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2117         check_added_monitors!(nodes[1], 1);
2118         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2119
2120         mine_transaction(&nodes[1], &bs_txn[0]);
2121         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2122         check_closed_broadcast!(nodes[1], true);
2123         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2124         check_added_monitors!(nodes[1], 1);
2125         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2126
2127         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
2128         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2129         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
2130         expect_payment_path_successful!(nodes[0]);
2131 }
2132
2133 fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
2134         // In early versions we did not handle resending of update_fee on reconnect correctly. The
2135         // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
2136         // explicitly here.
2137         let chanmon_cfgs = create_chanmon_cfgs(2);
2138         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2139         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2140         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2141
2142         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
2143         send_payment(&nodes[0], &[&nodes[1]], 1000);
2144
2145         {
2146                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2147                 *feerate_lock += 20;
2148         }
2149         nodes[0].node.timer_tick_occurred();
2150         check_added_monitors!(nodes[0], 1);
2151         let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2152         assert!(update_msgs.update_fee.is_some());
2153         if deliver_update {
2154                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2155         }
2156
2157         if parallel_updates {
2158                 {
2159                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2160                         *feerate_lock += 20;
2161                 }
2162                 nodes[0].node.timer_tick_occurred();
2163                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2164         }
2165
2166         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2167         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
2168
2169         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
2170         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2171         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
2172         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2173
2174         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2175         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
2176         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2177
2178         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2179         let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2180         assert_eq!(as_reconnect_msgs.len(), 2);
2181         if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); }
2182         let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap()
2183                 { updates } else { panic!(); };
2184         assert!(update_msgs.update_fee.is_some());
2185         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2186         if parallel_updates {
2187                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed);
2188                 check_added_monitors!(nodes[1], 1);
2189                 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2190                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2191                 check_added_monitors!(nodes[0], 1);
2192                 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2193
2194                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2195                 check_added_monitors!(nodes[0], 1);
2196                 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2197
2198                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap());
2199                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
2200                 check_added_monitors!(nodes[1], 1);
2201                 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2202
2203                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2204                 let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2205                 check_added_monitors!(nodes[1], 1);
2206
2207                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2208                 check_added_monitors!(nodes[0], 1);
2209
2210                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed);
2211                 check_added_monitors!(nodes[0], 1);
2212                 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2213
2214                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2215                 check_added_monitors!(nodes[1], 1);
2216         } else {
2217                 commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false);
2218         }
2219
2220         send_payment(&nodes[0], &[&nodes[1]], 1000);
2221 }
2222 #[test]
2223 fn update_fee_resend_test() {
2224         do_update_fee_resend_test(false, false);
2225         do_update_fee_resend_test(true, false);
2226         do_update_fee_resend_test(false, true);
2227         do_update_fee_resend_test(true, true);
2228 }
2229
2230 fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
2231         // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
2232         // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
2233         // that behavior was both somewhat unexpected and also broken (there was a debug assertion
2234         // which failed in such a case).
2235         let chanmon_cfgs = create_chanmon_cfgs(2);
2236         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2237         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2238         let persister: test_utils::TestPersister;
2239         let new_chain_monitor: test_utils::TestChainMonitor;
2240         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2241         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2242
2243         let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
2244         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
2245         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
2246
2247         // Do a really complicated dance to get an HTLC into the holding cell, with
2248         // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any
2249         // attempts to send an HTLC while MonitorUpdateInProgress is set are immediately
2250         // failed-backwards. Thus, the only way to get an AddHTLC into the holding cell is to add it
2251         // while AwaitingRemoteRevoke is set but MonitorUpdateInProgress is unset, and then swap the
2252         // flags.
2253         //
2254         // We do this by:
2255         //  a) routing a payment from node B to node A,
2256         //  b) sending a payment from node A to node B without delivering any of the generated messages,
2257         //     putting node A in AwaitingRemoteRevoke,
2258         //  c) sending a second payment from node A to node B, which is immediately placed in the
2259         //     holding cell,
2260         //  d) claiming the first payment from B, allowing us to fail the monitor update which occurs
2261         //     when we try to persist the payment preimage,
2262         //  e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
2263         //     clearing AwaitingRemoteRevoke on node A.
2264         //
2265         // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
2266         // (c) will not be freed from the holding cell.
2267         let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
2268
2269         nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
2270         check_added_monitors!(nodes[0], 1);
2271         let send = SendEvent::from_node(&nodes[0]);
2272         assert_eq!(send.msgs.len(), 1);
2273
2274         nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
2275         check_added_monitors!(nodes[0], 0);
2276
2277         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2278         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2279         nodes[0].node.claim_funds(payment_preimage_0);
2280         check_added_monitors!(nodes[0], 1);
2281         expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
2282
2283         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
2284         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
2285         check_added_monitors!(nodes[1], 1);
2286
2287         let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2288
2289         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
2290         check_added_monitors!(nodes[0], 1);
2291
2292         if disconnect {
2293                 // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
2294                 // disconnect the peers. Note that the fuzzer originally found this issue because
2295                 // deserializing a ChannelManager in this state causes an assertion failure.
2296                 if reload_a {
2297                         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2298                         reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
2299                 } else {
2300                         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2301                 }
2302                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
2303
2304                 // Now reconnect the two
2305                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
2306                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
2307                 assert_eq!(reestablish_1.len(), 1);
2308                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
2309                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
2310                 assert_eq!(reestablish_2.len(), 1);
2311
2312                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
2313                 let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
2314                 check_added_monitors!(nodes[1], 0);
2315
2316                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
2317                 let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
2318
2319                 assert!(resp_0.0.is_none());
2320                 assert!(resp_0.1.is_none());
2321                 assert!(resp_0.2.is_none());
2322                 assert!(resp_1.0.is_none());
2323                 assert!(resp_1.1.is_none());
2324
2325                 // Check that the freshly-generated cs is equal to the original (which we will deliver in a
2326                 // moment).
2327                 if let Some(pending_cs) = resp_1.2 {
2328                         assert!(pending_cs.update_add_htlcs.is_empty());
2329                         assert!(pending_cs.update_fail_htlcs.is_empty());
2330                         assert!(pending_cs.update_fulfill_htlcs.is_empty());
2331                         assert_eq!(pending_cs.commitment_signed, cs);
2332                 } else { panic!(); }
2333
2334                 // There should be no monitor updates as we are still pending awaiting a failed one.
2335                 check_added_monitors!(nodes[0], 0);
2336                 check_added_monitors!(nodes[1], 0);
2337         }
2338
2339         // If we finish updating the monitor, we should free the holding cell right away (this did
2340         // not occur prior to #756).
2341         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2342         let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
2343         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
2344
2345         // New outbound messages should be generated immediately upon a call to
2346         // get_and_clear_pending_msg_events (but not before).
2347         check_added_monitors!(nodes[0], 0);
2348         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2349         check_added_monitors!(nodes[0], 1);
2350         assert_eq!(events.len(), 1);
2351
2352         // Deliver the pending in-flight CS
2353         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
2354         check_added_monitors!(nodes[0], 1);
2355
2356         let commitment_msg = match events.pop().unwrap() {
2357                 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2358                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
2359                         assert!(updates.update_fail_htlcs.is_empty());
2360                         assert!(updates.update_fail_malformed_htlcs.is_empty());
2361                         assert!(updates.update_fee.is_none());
2362                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2363                         nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
2364                         expect_payment_sent_without_paths!(nodes[1], payment_preimage_0);
2365                         assert_eq!(updates.update_add_htlcs.len(), 1);
2366                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
2367                         updates.commitment_signed
2368                 },
2369                 _ => panic!("Unexpected event type!"),
2370         };
2371
2372         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg);
2373         check_added_monitors!(nodes[1], 1);
2374
2375         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2376         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
2377         expect_pending_htlcs_forwardable!(nodes[1]);
2378         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
2379         check_added_monitors!(nodes[1], 1);
2380
2381         commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
2382
2383         let events = nodes[1].node.get_and_clear_pending_events();
2384         assert_eq!(events.len(), 2);
2385         match events[0] {
2386                 Event::PendingHTLCsForwardable { .. } => { },
2387                 _ => panic!("Unexpected event"),
2388         };
2389         match events[1] {
2390                 Event::PaymentPathSuccessful { .. } => { },
2391                 _ => panic!("Unexpected event"),
2392         };
2393
2394         nodes[1].node.process_pending_htlc_forwards();
2395         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 100000);
2396
2397         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
2398         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
2399 }
2400 #[test]
2401 fn channel_holding_cell_serialize() {
2402         do_channel_holding_cell_serialize(true, true);
2403         do_channel_holding_cell_serialize(true, false);
2404         do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
2405 }
2406
2407 #[derive(PartialEq)]
2408 enum HTLCStatusAtDupClaim {
2409         Received,
2410         HoldingCell,
2411         Cleared,
2412 }
2413 fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
2414         // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2415         // along the payment path before waiting for a full commitment_signed dance. This is great, but
2416         // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2417         // reconnects, and then has to re-send its update_fulfill_htlc message again.
2418         // In previous code, we didn't handle the double-claim correctly, spuriously closing the
2419         // channel on which the inbound HTLC was received.
2420         let chanmon_cfgs = create_chanmon_cfgs(3);
2421         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2422         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2423         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2424
2425         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
2426         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
2427
2428         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
2429
2430         let mut as_raa = None;
2431         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2432                 // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2433                 // awaiting a remote revoke_and_ack from nodes[0].
2434                 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
2435                 nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
2436                 check_added_monitors!(nodes[0], 1);
2437
2438                 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2439                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2440                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2441                 check_added_monitors!(nodes[1], 1);
2442
2443                 let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2444                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2445                 check_added_monitors!(nodes[0], 1);
2446                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2447                 check_added_monitors!(nodes[0], 1);
2448
2449                 as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2450         }
2451
2452         let fulfill_msg = msgs::UpdateFulfillHTLC {
2453                 channel_id: chan_id_2,
2454                 htlc_id: 0,
2455                 payment_preimage,
2456         };
2457         if second_fails {
2458                 nodes[2].node.fail_htlc_backwards(&payment_hash);
2459                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
2460                 check_added_monitors!(nodes[2], 1);
2461                 get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2462         } else {
2463                 nodes[2].node.claim_funds(payment_preimage);
2464                 check_added_monitors!(nodes[2], 1);
2465                 expect_payment_claimed!(nodes[2], payment_hash, 100_000);
2466
2467                 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2468                 assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2469                 // Check that the message we're about to deliver matches the one generated:
2470                 assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
2471         }
2472         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
2473         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2474         check_added_monitors!(nodes[1], 1);
2475
2476         let mut bs_updates = None;
2477         if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2478                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2479                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2480                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2481                 expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2482                 if htlc_status == HTLCStatusAtDupClaim::Cleared {
2483                         commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2484                         expect_payment_path_successful!(nodes[0]);
2485                 }
2486         } else {
2487                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2488         }
2489
2490         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
2491         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2492
2493         if second_fails {
2494                 reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
2495                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2496         } else {
2497                 reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
2498         }
2499
2500         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2501                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2502                 check_added_monitors!(nodes[1], 1);
2503                 expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2504
2505                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2506                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2507                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2508                 expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2509         }
2510         if htlc_status != HTLCStatusAtDupClaim::Cleared {
2511                 commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2512                 expect_payment_path_successful!(nodes[0]);
2513         }
2514 }
2515
2516 #[test]
2517 fn test_reconnect_dup_htlc_claims() {
2518         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
2519         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
2520         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
2521         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
2522         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
2523         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
2524 }
2525
2526 #[test]
2527 fn test_temporary_error_during_shutdown() {
2528         // Test that temporary failures when updating the monitor's shutdown script delay cooperative
2529         // close.
2530         let mut config = test_default_channel_config();
2531         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2532
2533         let chanmon_cfgs = create_chanmon_cfgs(2);
2534         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2535         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
2536         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2537
2538         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
2539
2540         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2541         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2542
2543         nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
2544         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
2545         check_added_monitors!(nodes[1], 1);
2546
2547         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
2548         check_added_monitors!(nodes[0], 1);
2549
2550         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2551
2552         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2553         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2554
2555         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2556         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2557         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()));
2558
2559         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2560
2561         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2562         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2563         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2564
2565         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
2566         let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
2567         let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2568
2569         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap());
2570         let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
2571         assert!(none_b.is_none());
2572         let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2573
2574         assert_eq!(txn_a, txn_b);
2575         assert_eq!(txn_a.len(), 1);
2576         check_spends!(txn_a[0], funding_tx);
2577         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
2578         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
2579 }
2580
2581 #[test]
2582 fn test_permanent_error_during_sending_shutdown() {
2583         // Test that permanent failures when updating the monitor's shutdown script result in a force
2584         // close when initiating a cooperative close.
2585         let mut config = test_default_channel_config();
2586         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2587
2588         let chanmon_cfgs = create_chanmon_cfgs(2);
2589         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2590         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
2591         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2592
2593         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
2594         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2595
2596         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2597         check_closed_broadcast!(nodes[0], true);
2598         check_added_monitors!(nodes[0], 2);
2599         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2600 }
2601
2602 #[test]
2603 fn test_permanent_error_during_handling_shutdown() {
2604         // Test that permanent failures when updating the monitor's shutdown script result in a force
2605         // close when handling a cooperative close.
2606         let mut config = test_default_channel_config();
2607         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2608
2609         let chanmon_cfgs = create_chanmon_cfgs(2);
2610         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2611         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
2612         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2613
2614         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
2615         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2616
2617         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2618         let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
2619         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &shutdown);
2620         check_closed_broadcast!(nodes[1], true);
2621         check_added_monitors!(nodes[1], 2);
2622         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2623 }
2624
2625 #[test]
2626 fn double_temp_error() {
2627         // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row.
2628         let chanmon_cfgs = create_chanmon_cfgs(2);
2629         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2630         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2631         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2632
2633         let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
2634
2635         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2636         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2637
2638         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2639         // `claim_funds` results in a ChannelMonitorUpdate.
2640         nodes[1].node.claim_funds(payment_preimage_1);
2641         check_added_monitors!(nodes[1], 1);
2642         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
2643         let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2644
2645         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2646         // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
2647         // which had some asserts that prevented it from being called twice.
2648         nodes[1].node.claim_funds(payment_preimage_2);
2649         check_added_monitors!(nodes[1], 1);
2650         expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
2651         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2652
2653         let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2654         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
2655         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2656         check_added_monitors!(nodes[1], 0);
2657         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
2658
2659         // Complete the first HTLC.
2660         let events = nodes[1].node.get_and_clear_pending_msg_events();
2661         assert_eq!(events.len(), 1);
2662         let (update_fulfill_1, commitment_signed_b1, node_id) = {
2663                 match &events[0] {
2664                         &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2665                                 assert!(update_add_htlcs.is_empty());
2666                                 assert_eq!(update_fulfill_htlcs.len(), 1);
2667                                 assert!(update_fail_htlcs.is_empty());
2668                                 assert!(update_fail_malformed_htlcs.is_empty());
2669                                 assert!(update_fee.is_none());
2670                                 (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
2671                         },
2672                         _ => panic!("Unexpected event"),
2673                 }
2674         };
2675         assert_eq!(node_id, nodes[0].node.get_our_node_id());
2676         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
2677         check_added_monitors!(nodes[0], 0);
2678         expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
2679         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
2680         check_added_monitors!(nodes[0], 1);
2681         nodes[0].node.process_pending_htlc_forwards();
2682         let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2683         check_added_monitors!(nodes[1], 0);
2684         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2685         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1);
2686         check_added_monitors!(nodes[1], 1);
2687         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1);
2688         check_added_monitors!(nodes[1], 1);
2689
2690         // Complete the second HTLC.
2691         let ((update_fulfill_2, commitment_signed_b2), raa_b2) = {
2692                 let events = nodes[1].node.get_and_clear_pending_msg_events();
2693                 assert_eq!(events.len(), 2);
2694                 (match &events[0] {
2695                         MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2696                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2697                                 assert!(updates.update_add_htlcs.is_empty());
2698                                 assert!(updates.update_fail_htlcs.is_empty());
2699                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
2700                                 assert!(updates.update_fee.is_none());
2701                                 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2702                                 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
2703                         },
2704                         _ => panic!("Unexpected event"),
2705                 },
2706                  match events[1] {
2707                          MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
2708                                  assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2709                                  (*msg).clone()
2710                          },
2711                          _ => panic!("Unexpected event"),
2712                  })
2713         };
2714         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2);
2715         check_added_monitors!(nodes[0], 1);
2716         expect_payment_path_successful!(nodes[0]);
2717
2718         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2);
2719         check_added_monitors!(nodes[0], 0);
2720         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2721         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
2722         expect_payment_sent!(nodes[0], payment_preimage_2);
2723 }
2724
2725 fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
2726         // Test that if the monitor update generated in funding_signed is stored async and we restart
2727         // with the latest ChannelManager but the ChannelMonitor persistence never completed we happily
2728         // drop the channel and move on.
2729         let chanmon_cfgs = create_chanmon_cfgs(2);
2730         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2731
2732         let persister: test_utils::TestPersister;
2733         let new_chain_monitor: test_utils::TestChainMonitor;
2734         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2735
2736         let mut chan_config = test_default_channel_config();
2737         chan_config.manually_accept_inbound_channels = true;
2738         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2739
2740         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2741         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2742
2743         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2744         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2745
2746         let events = nodes[1].node.get_and_clear_pending_events();
2747         assert_eq!(events.len(), 1);
2748         match events[0] {
2749                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2750                         if use_0conf {
2751                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2752                         } else {
2753                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2754                         }
2755                 },
2756                 _ => panic!("Unexpected event"),
2757         };
2758
2759         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2760
2761         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2762
2763         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2764         check_added_monitors!(nodes[0], 0);
2765
2766         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2767         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2768         check_added_monitors!(nodes[1], 1);
2769
2770         let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
2771         assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
2772         match &bs_signed_locked[0] {
2773                 MessageSendEvent::SendFundingSigned { msg, .. } => {
2774                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2775
2776                         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
2777                         check_added_monitors!(nodes[0], 1);
2778                 }
2779                 _ => panic!("Unexpected event"),
2780         }
2781         if use_0conf {
2782                 match &bs_signed_locked[1] {
2783                         MessageSendEvent::SendChannelReady { msg, .. } => {
2784                                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
2785                         }
2786                         _ => panic!("Unexpected event"),
2787                 }
2788         }
2789
2790         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
2791         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2792         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2793
2794         // nodes[0] is now waiting on the first ChannelMonitor persistence to complete in order to
2795         // broadcast the funding transaction. If nodes[0] restarts at this point with the
2796         // ChannelMonitor lost, we should simply discard the channel.
2797
2798         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2799         // not, so we have to clear them here.
2800         nodes[0].chain_source.watched_txn.lock().unwrap().clear();
2801         nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
2802
2803         reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
2804         check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
2805         assert!(nodes[0].node.list_channels().is_empty());
2806 }
2807
2808 #[test]
2809 fn test_outbound_reload_without_init_mon() {
2810         do_test_outbound_reload_without_init_mon(true);
2811         do_test_outbound_reload_without_init_mon(false);
2812 }
2813
2814 fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: bool) {
2815         // Test that if the monitor update generated by funding_transaction_generated is stored async
2816         // and we restart with the latest ChannelManager but the ChannelMonitor persistence never
2817         // completed we happily drop the channel and move on.
2818         let chanmon_cfgs = create_chanmon_cfgs(2);
2819         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2820
2821         let persister: test_utils::TestPersister;
2822         let new_chain_monitor: test_utils::TestChainMonitor;
2823         let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2824
2825         let mut chan_config = test_default_channel_config();
2826         chan_config.manually_accept_inbound_channels = true;
2827         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2828
2829         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2830         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2831
2832         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2833         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2834
2835         let events = nodes[1].node.get_and_clear_pending_events();
2836         assert_eq!(events.len(), 1);
2837         match events[0] {
2838                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2839                         if use_0conf {
2840                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2841                         } else {
2842                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2843                         }
2844                 },
2845                 _ => panic!("Unexpected event"),
2846         };
2847
2848         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2849
2850         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2851
2852         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2853         check_added_monitors!(nodes[0], 0);
2854
2855         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2856         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2857         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2858         check_added_monitors!(nodes[1], 1);
2859
2860         // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the
2861         // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding
2862         // transaction is confirmed.
2863         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
2864
2865         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
2866         check_added_monitors!(nodes[0], 1);
2867
2868         let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2869         if lock_commitment {
2870                 confirm_transaction(&nodes[0], &as_funding_tx[0]);
2871                 confirm_transaction(&nodes[1], &as_funding_tx[0]);
2872         }
2873         if use_0conf || lock_commitment {
2874                 let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
2875                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_ready);
2876         }
2877         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2878
2879         // nodes[1] is now waiting on the first ChannelMonitor persistence to complete in order to
2880         // move the channel to ready (or is waiting on the funding transaction to confirm). If nodes[1]
2881         // restarts at this point with the ChannelMonitor lost, we should simply discard the channel.
2882
2883         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2884         // not, so we have to clear them here.
2885         nodes[1].chain_source.watched_txn.lock().unwrap().clear();
2886         nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
2887
2888         reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
2889
2890         check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
2891         assert!(nodes[1].node.list_channels().is_empty());
2892 }
2893
2894 #[test]
2895 fn test_inbound_reload_without_init_mon() {
2896         do_test_inbound_reload_without_init_mon(true, true);
2897         do_test_inbound_reload_without_init_mon(true, false);
2898         do_test_inbound_reload_without_init_mon(false, true);
2899         do_test_inbound_reload_without_init_mon(false, false);
2900 }