Merge pull request #1107 from dunxen/2021-10-swap-pubkey-for-bytearray
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
11 //! monitor updates.
12 //! There are a bunch of these as their handling is relatively error-prone so they are split out
13 //! here. See also the chanmon_fail_consistency fuzz test.
14
15 use bitcoin::blockdata::block::{Block, BlockHeader};
16 use bitcoin::blockdata::constants::genesis_block;
17 use bitcoin::hash_types::BlockHash;
18 use bitcoin::network::constants::Network;
19 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr};
20 use chain::transaction::OutPoint;
21 use chain::Listen;
22 use chain::Watch;
23 use ln::{PaymentPreimage, PaymentHash};
24 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure};
25 use ln::features::{InitFeatures, InvoiceFeatures};
26 use ln::msgs;
27 use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler};
28 use routing::router::get_route;
29 use util::config::UserConfig;
30 use util::enforcing_trait_impls::EnforcingSigner;
31 use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
32 use util::errors::APIError;
33 use util::ser::{ReadableArgs, Writeable};
34 use util::test_utils::TestBroadcaster;
35
36 use bitcoin::hashes::sha256::Hash as Sha256;
37 use bitcoin::hashes::Hash;
38
39 use ln::functional_test_utils::*;
40
41 use util::test_utils;
42
43 use io;
44 use prelude::*;
45 use sync::{Arc, Mutex};
46
47 // If persister_fail is true, we have the persister return a PermanentFailure
48 // instead of the higher-level ChainMonitor.
49 fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) {
50         // Test that we handle a simple permanent monitor update failure
51         let mut chanmon_cfgs = create_chanmon_cfgs(2);
52         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
53         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
54         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
55         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
56         let logger = test_utils::TestLogger::new();
57
58         let (_, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(&nodes[1]);
59
60         match persister_fail {
61                 true => chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)),
62                 false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure))
63         }
64         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
65         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
66         unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable {..}, {});
67         check_added_monitors!(nodes[0], 2);
68
69         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
70         assert_eq!(events_1.len(), 2);
71         match events_1[0] {
72                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
73                 _ => panic!("Unexpected event"),
74         };
75         match events_1[1] {
76                 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
77                 _ => panic!("Unexpected event"),
78         };
79
80         // TODO: Once we hit the chain with the failure transaction we should check that we get a
81         // PaymentPathFailed event
82
83         assert_eq!(nodes[0].node.list_channels().len(), 0);
84         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
85 }
86
87 #[test]
88 fn test_monitor_and_persister_update_fail() {
89         // Test that if both updating the `ChannelMonitor` and persisting the updated
90         // `ChannelMonitor` fail, then the failure from updating the `ChannelMonitor`
91         // one that gets returned.
92         let chanmon_cfgs = create_chanmon_cfgs(2);
93         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
94         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
95         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
96
97         // Create some initial channel
98         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
99         let outpoint = OutPoint { txid: chan.3.txid(), index: 0 };
100
101         // Rebalance the network to generate htlc in the two directions
102         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
103
104         // Route an HTLC from node 0 to node 1 (but don't settle)
105         let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
106
107         // Make a copy of the ChainMonitor so we can capture the error it returns on a
108         // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
109         // directly, the node would fail to be `Drop`'d at the end because its
110         // ChannelManager and ChainMonitor would be out of sync.
111         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
112         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
113         let persister = test_utils::TestPersister::new();
114         let tx_broadcaster = TestBroadcaster {
115                 txn_broadcasted: Mutex::new(Vec::new()),
116                 // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
117                 // that we are at height 200 so that it doesn't think we're violating the time lock
118                 // requirements of transactions broadcasted at that point.
119                 blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 200); 200])),
120         };
121         let chain_mon = {
122                 let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
123                 let monitor = monitors.get(&outpoint).unwrap();
124                 let mut w = test_utils::TestVecWriter(Vec::new());
125                 monitor.write(&mut w).unwrap();
126                 let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
127                         &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
128                 assert!(new_monitor == *monitor);
129                 let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
130                 assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok());
131                 chain_mon
132         };
133         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
134         chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
135
136         // Set the persister's return value to be a TemporaryFailure.
137         persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
138
139         // Try to update ChannelMonitor
140         assert!(nodes[1].node.claim_funds(preimage));
141         check_added_monitors!(nodes[1], 1);
142         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
143         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
144         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
145         if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) {
146                 if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
147                         // Check that even though the persister is returning a TemporaryFailure,
148                         // because the update is bogus, ultimately the error that's returned
149                         // should be a PermanentFailure.
150                         if let Err(ChannelMonitorUpdateErr::PermanentFailure) = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); }
151                         logger.assert_log_contains("lightning::chain::chainmonitor".to_string(), "Failed to persist channel monitor update: TemporaryFailure".to_string(), 1);
152                         if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
153                 } else { assert!(false); }
154         } else { assert!(false); };
155
156         check_added_monitors!(nodes[0], 1);
157         let events = nodes[0].node.get_and_clear_pending_events();
158         assert_eq!(events.len(), 1);
159 }
160
161 #[test]
162 fn test_simple_monitor_permanent_update_fail() {
163         do_test_simple_monitor_permanent_update_fail(false);
164
165         // Test behavior when the persister returns a PermanentFailure.
166         do_test_simple_monitor_permanent_update_fail(true);
167 }
168
169 // If persister_fail is true, we have the persister return a TemporaryFailure instead of the
170 // higher-level ChainMonitor.
171 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail: bool) {
172         // Test that we can recover from a simple temporary monitor update failure optionally with
173         // a disconnect in between
174         let mut chanmon_cfgs = create_chanmon_cfgs(2);
175         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
176         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
177         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
178         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
179         let logger = test_utils::TestLogger::new();
180
181         let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(&nodes[1]);
182
183         match persister_fail {
184                 true => chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)),
185                 false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure))
186         }
187
188         {
189                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
190                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
191                 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateFailed, {});
192                 check_added_monitors!(nodes[0], 1);
193         }
194
195         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
196         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
197         assert_eq!(nodes[0].node.list_channels().len(), 1);
198
199         if disconnect {
200                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
201                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
202                 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
203         }
204
205         match persister_fail {
206                 true => chanmon_cfgs[0].persister.set_update_ret(Ok(())),
207                 false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()))
208         }
209         let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
210         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
211         check_added_monitors!(nodes[0], 0);
212
213         let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
214         assert_eq!(events_2.len(), 1);
215         let payment_event = SendEvent::from_event(events_2.pop().unwrap());
216         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
217         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
218         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
219
220         expect_pending_htlcs_forwardable!(nodes[1]);
221
222         let events_3 = nodes[1].node.get_and_clear_pending_events();
223         assert_eq!(events_3.len(), 1);
224         match events_3[0] {
225                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
226                         assert_eq!(payment_hash_1, *payment_hash);
227                         assert_eq!(amt, 1000000);
228                         match &purpose {
229                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
230                                         assert!(payment_preimage.is_none());
231                                         assert_eq!(payment_secret_1, *payment_secret);
232                                 },
233                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
234                         }
235                 },
236                 _ => panic!("Unexpected event"),
237         }
238
239         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
240
241         // Now set it to failed again...
242         let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
243         {
244                 match persister_fail {
245                         true => chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)),
246                         false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure))
247                 }
248                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
249                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
250                 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
251                 check_added_monitors!(nodes[0], 1);
252         }
253
254         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
255         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
256         assert_eq!(nodes[0].node.list_channels().len(), 1);
257
258         if disconnect {
259                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
260                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
261                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
262         }
263
264         // ...and make sure we can force-close a frozen channel
265         nodes[0].node.force_close_channel(&channel_id).unwrap();
266         check_added_monitors!(nodes[0], 1);
267         check_closed_broadcast!(nodes[0], true);
268
269         // TODO: Once we hit the chain with the failure transaction we should check that we get a
270         // PaymentPathFailed event
271
272         assert_eq!(nodes[0].node.list_channels().len(), 0);
273         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
274 }
275
276 #[test]
277 fn test_simple_monitor_temporary_update_fail() {
278         do_test_simple_monitor_temporary_update_fail(false, false);
279         do_test_simple_monitor_temporary_update_fail(true, false);
280
281         // Test behavior when the persister returns a TemporaryFailure.
282         do_test_simple_monitor_temporary_update_fail(false, true);
283         do_test_simple_monitor_temporary_update_fail(true, true);
284 }
285
286 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
287         let disconnect_flags = 8 | 16;
288
289         // Test that we can recover from a temporary monitor update failure with some in-flight
290         // HTLCs going on at the same time potentially with some disconnection thrown in.
291         // * First we route a payment, then get a temporary monitor update failure when trying to
292         //   route a second payment. We then claim the first payment.
293         // * If disconnect_count is set, we will disconnect at this point (which is likely as
294         //   TemporaryFailure likely indicates net disconnect which resulted in failing to update
295         //   the ChannelMonitor on a watchtower).
296         // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
297         //   immediately, otherwise we wait disconnect and deliver them via the reconnect
298         //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
299         //   disconnect_count & !disconnect_flags is 0).
300         // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
301         //   through message sending, potentially disconnect/reconnecting multiple times based on
302         //   disconnect_count, to get the update_fulfill_htlc through.
303         // * We then walk through more message exchanges to get the original update_add_htlc
304         //   through, swapping message ordering based on disconnect_count & 8 and optionally
305         //   disconnect/reconnecting based on disconnect_count.
306         let chanmon_cfgs = create_chanmon_cfgs(2);
307         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
308         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
309         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
310         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
311         let logger = test_utils::TestLogger::new();
312
313         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
314
315         // Now try to send a second payment which will fail to send
316         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
317         {
318                 *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
319                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
320                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
321                 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
322                 check_added_monitors!(nodes[0], 1);
323         }
324
325         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
326         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
327         assert_eq!(nodes[0].node.list_channels().len(), 1);
328
329         // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
330         // but nodes[0] won't respond since it is frozen.
331         assert!(nodes[1].node.claim_funds(payment_preimage_1));
332         check_added_monitors!(nodes[1], 1);
333         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
334         assert_eq!(events_2.len(), 1);
335         let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
336                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
337                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
338                         assert!(update_add_htlcs.is_empty());
339                         assert_eq!(update_fulfill_htlcs.len(), 1);
340                         assert!(update_fail_htlcs.is_empty());
341                         assert!(update_fail_malformed_htlcs.is_empty());
342                         assert!(update_fee.is_none());
343
344                         if (disconnect_count & 16) == 0 {
345                                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
346                                 let events_3 = nodes[0].node.get_and_clear_pending_events();
347                                 assert_eq!(events_3.len(), 1);
348                                 match events_3[0] {
349                                         Event::PaymentSent { ref payment_preimage, ref payment_hash } => {
350                                                 assert_eq!(*payment_preimage, payment_preimage_1);
351                                                 assert_eq!(*payment_hash, payment_hash_1);
352                                         },
353                                         _ => panic!("Unexpected event"),
354                                 }
355
356                                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
357                                 check_added_monitors!(nodes[0], 1);
358                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
359                                 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
360                         }
361
362                         (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
363                 },
364                 _ => panic!("Unexpected event"),
365         };
366
367         if disconnect_count & !disconnect_flags > 0 {
368                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
369                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
370         }
371
372         // Now fix monitor updating...
373         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
374         let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
375         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
376         check_added_monitors!(nodes[0], 0);
377
378         macro_rules! disconnect_reconnect_peers { () => { {
379                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
380                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
381
382                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
383                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
384                 assert_eq!(reestablish_1.len(), 1);
385                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
386                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
387                 assert_eq!(reestablish_2.len(), 1);
388
389                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
390                 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
391                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
392                 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
393
394                 assert!(as_resp.0.is_none());
395                 assert!(bs_resp.0.is_none());
396
397                 (reestablish_1, reestablish_2, as_resp, bs_resp)
398         } } }
399
400         let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
401                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
402                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
403
404                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
405                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
406                 assert_eq!(reestablish_1.len(), 1);
407                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
408                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
409                 assert_eq!(reestablish_2.len(), 1);
410
411                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
412                 check_added_monitors!(nodes[0], 0);
413                 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
414                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
415                 check_added_monitors!(nodes[1], 0);
416                 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
417
418                 assert!(as_resp.0.is_none());
419                 assert!(bs_resp.0.is_none());
420
421                 assert!(bs_resp.1.is_none());
422                 if (disconnect_count & 16) == 0 {
423                         assert!(bs_resp.2.is_none());
424
425                         assert!(as_resp.1.is_some());
426                         assert!(as_resp.2.is_some());
427                         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
428                 } else {
429                         assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
430                         assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
431                         assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
432                         assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
433                         assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
434                         assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
435
436                         assert!(as_resp.1.is_none());
437
438                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
439                         let events_3 = nodes[0].node.get_and_clear_pending_events();
440                         assert_eq!(events_3.len(), 1);
441                         match events_3[0] {
442                                 Event::PaymentSent { ref payment_preimage, ref payment_hash } => {
443                                         assert_eq!(*payment_preimage, payment_preimage_1);
444                                         assert_eq!(*payment_hash, payment_hash_1);
445                                 },
446                                 _ => panic!("Unexpected event"),
447                         }
448
449                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
450                         let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
451                         // No commitment_signed so get_event_msg's assert(len == 1) passes
452                         check_added_monitors!(nodes[0], 1);
453
454                         as_resp.1 = Some(as_resp_raa);
455                         bs_resp.2 = None;
456                 }
457
458                 if disconnect_count & !disconnect_flags > 1 {
459                         let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
460
461                         if (disconnect_count & 16) == 0 {
462                                 assert!(reestablish_1 == second_reestablish_1);
463                                 assert!(reestablish_2 == second_reestablish_2);
464                         }
465                         assert!(as_resp == second_as_resp);
466                         assert!(bs_resp == second_bs_resp);
467                 }
468
469                 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
470         } else {
471                 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
472                 assert_eq!(events_4.len(), 2);
473                 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
474                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
475                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
476                                 msg.clone()
477                         },
478                         _ => panic!("Unexpected event"),
479                 })
480         };
481
482         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
483
484         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
485         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
486         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
487         // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
488         check_added_monitors!(nodes[1], 1);
489
490         if disconnect_count & !disconnect_flags > 2 {
491                 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
492
493                 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
494                 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
495
496                 assert!(as_resp.2.is_none());
497                 assert!(bs_resp.2.is_none());
498         }
499
500         let as_commitment_update;
501         let bs_second_commitment_update;
502
503         macro_rules! handle_bs_raa { () => {
504                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
505                 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
506                 assert!(as_commitment_update.update_add_htlcs.is_empty());
507                 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
508                 assert!(as_commitment_update.update_fail_htlcs.is_empty());
509                 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
510                 assert!(as_commitment_update.update_fee.is_none());
511                 check_added_monitors!(nodes[0], 1);
512         } }
513
514         macro_rules! handle_initial_raa { () => {
515                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
516                 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
517                 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
518                 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
519                 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
520                 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
521                 assert!(bs_second_commitment_update.update_fee.is_none());
522                 check_added_monitors!(nodes[1], 1);
523         } }
524
525         if (disconnect_count & 8) == 0 {
526                 handle_bs_raa!();
527
528                 if disconnect_count & !disconnect_flags > 3 {
529                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
530
531                         assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
532                         assert!(bs_resp.1.is_none());
533
534                         assert!(as_resp.2.unwrap() == as_commitment_update);
535                         assert!(bs_resp.2.is_none());
536
537                         assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
538                 }
539
540                 handle_initial_raa!();
541
542                 if disconnect_count & !disconnect_flags > 4 {
543                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
544
545                         assert!(as_resp.1.is_none());
546                         assert!(bs_resp.1.is_none());
547
548                         assert!(as_resp.2.unwrap() == as_commitment_update);
549                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
550                 }
551         } else {
552                 handle_initial_raa!();
553
554                 if disconnect_count & !disconnect_flags > 3 {
555                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
556
557                         assert!(as_resp.1.is_none());
558                         assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
559
560                         assert!(as_resp.2.is_none());
561                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
562
563                         assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
564                 }
565
566                 handle_bs_raa!();
567
568                 if disconnect_count & !disconnect_flags > 4 {
569                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
570
571                         assert!(as_resp.1.is_none());
572                         assert!(bs_resp.1.is_none());
573
574                         assert!(as_resp.2.unwrap() == as_commitment_update);
575                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
576                 }
577         }
578
579         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
580         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
581         // No commitment_signed so get_event_msg's assert(len == 1) passes
582         check_added_monitors!(nodes[0], 1);
583
584         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
585         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
586         // No commitment_signed so get_event_msg's assert(len == 1) passes
587         check_added_monitors!(nodes[1], 1);
588
589         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
590         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
591         check_added_monitors!(nodes[1], 1);
592
593         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
594         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
595         check_added_monitors!(nodes[0], 1);
596
597         expect_pending_htlcs_forwardable!(nodes[1]);
598
599         let events_5 = nodes[1].node.get_and_clear_pending_events();
600         assert_eq!(events_5.len(), 1);
601         match events_5[0] {
602                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
603                         assert_eq!(payment_hash_2, *payment_hash);
604                         assert_eq!(amt, 1000000);
605                         match &purpose {
606                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
607                                         assert!(payment_preimage.is_none());
608                                         assert_eq!(payment_secret_2, *payment_secret);
609                                 },
610                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
611                         }
612                 },
613                 _ => panic!("Unexpected event"),
614         }
615
616         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
617 }
618
619 #[test]
620 fn test_monitor_temporary_update_fail_a() {
621         do_test_monitor_temporary_update_fail(0);
622         do_test_monitor_temporary_update_fail(1);
623         do_test_monitor_temporary_update_fail(2);
624         do_test_monitor_temporary_update_fail(3);
625         do_test_monitor_temporary_update_fail(4);
626         do_test_monitor_temporary_update_fail(5);
627 }
628
629 #[test]
630 fn test_monitor_temporary_update_fail_b() {
631         do_test_monitor_temporary_update_fail(2 | 8);
632         do_test_monitor_temporary_update_fail(3 | 8);
633         do_test_monitor_temporary_update_fail(4 | 8);
634         do_test_monitor_temporary_update_fail(5 | 8);
635 }
636
637 #[test]
638 fn test_monitor_temporary_update_fail_c() {
639         do_test_monitor_temporary_update_fail(1 | 16);
640         do_test_monitor_temporary_update_fail(2 | 16);
641         do_test_monitor_temporary_update_fail(3 | 16);
642         do_test_monitor_temporary_update_fail(2 | 8 | 16);
643         do_test_monitor_temporary_update_fail(3 | 8 | 16);
644 }
645
646 #[test]
647 fn test_monitor_update_fail_cs() {
648         // Tests handling of a monitor update failure when processing an incoming commitment_signed
649         let chanmon_cfgs = create_chanmon_cfgs(2);
650         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
651         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
652         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
653         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
654         let logger = test_utils::TestLogger::new();
655
656         let (payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
657         {
658                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
659                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
660                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
661                 check_added_monitors!(nodes[0], 1);
662         }
663
664         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
665         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
666
667         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
668         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
669         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
670         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
671         check_added_monitors!(nodes[1], 1);
672         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
673
674         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
675         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
676         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
677         check_added_monitors!(nodes[1], 0);
678         let responses = nodes[1].node.get_and_clear_pending_msg_events();
679         assert_eq!(responses.len(), 2);
680
681         match responses[0] {
682                 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
683                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
684                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
685                         check_added_monitors!(nodes[0], 1);
686                 },
687                 _ => panic!("Unexpected event"),
688         }
689         match responses[1] {
690                 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
691                         assert!(updates.update_add_htlcs.is_empty());
692                         assert!(updates.update_fulfill_htlcs.is_empty());
693                         assert!(updates.update_fail_htlcs.is_empty());
694                         assert!(updates.update_fail_malformed_htlcs.is_empty());
695                         assert!(updates.update_fee.is_none());
696                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
697
698                         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
699                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
700                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
701                         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
702                         check_added_monitors!(nodes[0], 1);
703                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
704                 },
705                 _ => panic!("Unexpected event"),
706         }
707
708         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
709         let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
710         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
711         check_added_monitors!(nodes[0], 0);
712
713         let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
714         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
715         check_added_monitors!(nodes[1], 1);
716
717         expect_pending_htlcs_forwardable!(nodes[1]);
718
719         let events = nodes[1].node.get_and_clear_pending_events();
720         assert_eq!(events.len(), 1);
721         match events[0] {
722                 Event::PaymentReceived { payment_hash, ref purpose, amt } => {
723                         assert_eq!(payment_hash, our_payment_hash);
724                         assert_eq!(amt, 1000000);
725                         match &purpose {
726                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
727                                         assert!(payment_preimage.is_none());
728                                         assert_eq!(our_payment_secret, *payment_secret);
729                                 },
730                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
731                         }
732                 },
733                 _ => panic!("Unexpected event"),
734         };
735
736         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
737 }
738
739 #[test]
740 fn test_monitor_update_fail_no_rebroadcast() {
741         // Tests handling of a monitor update failure when no message rebroadcasting on
742         // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
743         // fuzz tests.
744         let chanmon_cfgs = create_chanmon_cfgs(2);
745         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
746         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
747         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
748         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
749         let logger = test_utils::TestLogger::new();
750
751         let (payment_preimage_1, our_payment_hash, payment_secret_1) = get_payment_preimage_hash!(nodes[1]);
752         {
753                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
754                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
755                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(payment_secret_1)).unwrap();
756                 check_added_monitors!(nodes[0], 1);
757         }
758
759         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
760         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
761         let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
762
763         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
764         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
765         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
766         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
767         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
768         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
769         check_added_monitors!(nodes[1], 1);
770
771         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
772         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
773         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
774         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
775         check_added_monitors!(nodes[1], 0);
776         expect_pending_htlcs_forwardable!(nodes[1]);
777
778         let events = nodes[1].node.get_and_clear_pending_events();
779         assert_eq!(events.len(), 1);
780         match events[0] {
781                 Event::PaymentReceived { payment_hash, .. } => {
782                         assert_eq!(payment_hash, our_payment_hash);
783                 },
784                 _ => panic!("Unexpected event"),
785         }
786
787         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
788 }
789
790 #[test]
791 fn test_monitor_update_raa_while_paused() {
792         // Tests handling of an RAA while monitor updating has already been marked failed.
793         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
794         let chanmon_cfgs = create_chanmon_cfgs(2);
795         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
796         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
797         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
798         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
799         let logger = test_utils::TestLogger::new();
800
801         send_payment(&nodes[0], &[&nodes[1]], 5000000);
802         let (payment_preimage_1, our_payment_hash_1, our_payment_secret_1) = get_payment_preimage_hash!(nodes[1]);
803         {
804                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
805                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
806                 nodes[0].node.send_payment(&route, our_payment_hash_1, &Some(our_payment_secret_1)).unwrap();
807                 check_added_monitors!(nodes[0], 1);
808         }
809         let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
810
811         let (payment_preimage_2, our_payment_hash_2, our_payment_secret_2) = get_payment_preimage_hash!(nodes[0]);
812         {
813                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
814                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
815                 nodes[1].node.send_payment(&route, our_payment_hash_2, &Some(our_payment_secret_2)).unwrap();
816                 check_added_monitors!(nodes[1], 1);
817         }
818         let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
819
820         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
821         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
822         check_added_monitors!(nodes[1], 1);
823         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
824
825         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
826         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
827         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
828         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
829         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
830         check_added_monitors!(nodes[0], 1);
831
832         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
833         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
834         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
835         check_added_monitors!(nodes[0], 1);
836
837         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
838         let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
839         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
840         check_added_monitors!(nodes[0], 0);
841
842         let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
843         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
844         check_added_monitors!(nodes[1], 1);
845         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
846
847         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
848         check_added_monitors!(nodes[1], 1);
849         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
850
851         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
852         check_added_monitors!(nodes[0], 1);
853         let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
854
855         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
856         check_added_monitors!(nodes[0], 1);
857         expect_pending_htlcs_forwardable!(nodes[0]);
858         expect_payment_received!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
859
860         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
861         check_added_monitors!(nodes[1], 1);
862         expect_pending_htlcs_forwardable!(nodes[1]);
863         expect_payment_received!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
864
865         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
866         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
867 }
868
869 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
870         // Tests handling of a monitor update failure when processing an incoming RAA
871         let chanmon_cfgs = create_chanmon_cfgs(3);
872         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
873         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
874         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
875         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
876         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
877         let logger = test_utils::TestLogger::new();
878
879         // Rebalance a bit so that we can send backwards from 2 to 1.
880         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
881
882         // Route a first payment that we'll fail backwards
883         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
884
885         // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
886         assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
887         expect_pending_htlcs_forwardable!(nodes[2]);
888         check_added_monitors!(nodes[2], 1);
889
890         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
891         assert!(updates.update_add_htlcs.is_empty());
892         assert!(updates.update_fulfill_htlcs.is_empty());
893         assert_eq!(updates.update_fail_htlcs.len(), 1);
894         assert!(updates.update_fail_malformed_htlcs.is_empty());
895         assert!(updates.update_fee.is_none());
896         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
897
898         let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
899         check_added_monitors!(nodes[0], 0);
900
901         // While the second channel is AwaitingRAA, forward a second payment to get it into the
902         // holding cell.
903         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[2]);
904         {
905                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
906                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
907                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
908                 check_added_monitors!(nodes[0], 1);
909         }
910
911         let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
912         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
913         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
914
915         expect_pending_htlcs_forwardable!(nodes[1]);
916         check_added_monitors!(nodes[1], 0);
917         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
918
919         // Now fail monitor updating.
920         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
921         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
922         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
923         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
924         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
925         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
926         check_added_monitors!(nodes[1], 1);
927
928         // Forward a third payment which will also be added to the holding cell, despite the channel
929         // being paused waiting a monitor update.
930         let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[2]);
931         {
932                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
933                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
934                 nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
935                 check_added_monitors!(nodes[0], 1);
936         }
937
938         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); // We succeed in updating the monitor for the first channel
939         send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
940         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
941         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
942         check_added_monitors!(nodes[1], 0);
943
944         // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
945         // and not forwarded.
946         expect_pending_htlcs_forwardable!(nodes[1]);
947         check_added_monitors!(nodes[1], 0);
948         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
949
950         let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
951                 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
952                 let (payment_preimage_4, payment_hash_4, payment_secret_4) = get_payment_preimage_hash!(nodes[0]);
953                 let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
954                 let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
955                 nodes[2].node.send_payment(&route, payment_hash_4, &Some(payment_secret_4)).unwrap();
956                 check_added_monitors!(nodes[2], 1);
957
958                 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
959                 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
960                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
961                 check_added_monitors!(nodes[1], 1);
962                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
963                 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
964                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
965                 (Some(payment_preimage_4), Some(payment_hash_4))
966         } else { (None, None) };
967
968         // Restore monitor updating, ensuring we immediately get a fail-back update and a
969         // update_add update.
970         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
971         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
972         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
973         check_added_monitors!(nodes[1], 0);
974         expect_pending_htlcs_forwardable!(nodes[1]);
975         check_added_monitors!(nodes[1], 1);
976
977         let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
978         if test_ignore_second_cs {
979                 assert_eq!(events_3.len(), 3);
980         } else {
981                 assert_eq!(events_3.len(), 2);
982         }
983
984         // Note that the ordering of the events for different nodes is non-prescriptive, though the
985         // ordering of the two events that both go to nodes[2] have to stay in the same order.
986         let messages_a = match events_3.pop().unwrap() {
987                 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
988                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
989                         assert!(updates.update_fulfill_htlcs.is_empty());
990                         assert_eq!(updates.update_fail_htlcs.len(), 1);
991                         assert!(updates.update_fail_malformed_htlcs.is_empty());
992                         assert!(updates.update_add_htlcs.is_empty());
993                         assert!(updates.update_fee.is_none());
994                         (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
995                 },
996                 _ => panic!("Unexpected event type!"),
997         };
998         let raa = if test_ignore_second_cs {
999                 match events_3.remove(1) {
1000                         MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
1001                                 assert_eq!(node_id, nodes[2].node.get_our_node_id());
1002                                 Some(msg.clone())
1003                         },
1004                         _ => panic!("Unexpected event"),
1005                 }
1006         } else { None };
1007         let send_event_b = SendEvent::from_event(events_3.remove(0));
1008         assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
1009
1010         // Now deliver the new messages...
1011
1012         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
1013         commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
1014         expect_payment_failed!(nodes[0], payment_hash_1, true);
1015
1016         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
1017         let as_cs;
1018         if test_ignore_second_cs {
1019                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
1020                 check_added_monitors!(nodes[2], 1);
1021                 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1022                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
1023                 check_added_monitors!(nodes[2], 1);
1024                 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1025                 assert!(bs_cs.update_add_htlcs.is_empty());
1026                 assert!(bs_cs.update_fail_htlcs.is_empty());
1027                 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
1028                 assert!(bs_cs.update_fulfill_htlcs.is_empty());
1029                 assert!(bs_cs.update_fee.is_none());
1030
1031                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1032                 check_added_monitors!(nodes[1], 1);
1033                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1034
1035                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
1036                 check_added_monitors!(nodes[1], 1);
1037         } else {
1038                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
1039                 check_added_monitors!(nodes[2], 1);
1040
1041                 let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
1042                 assert_eq!(bs_revoke_and_commit.len(), 2);
1043                 match bs_revoke_and_commit[0] {
1044                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1045                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1046                                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
1047                                 check_added_monitors!(nodes[1], 1);
1048                         },
1049                         _ => panic!("Unexpected event"),
1050                 }
1051
1052                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1053
1054                 match bs_revoke_and_commit[1] {
1055                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1056                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1057                                 assert!(updates.update_add_htlcs.is_empty());
1058                                 assert!(updates.update_fail_htlcs.is_empty());
1059                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
1060                                 assert!(updates.update_fulfill_htlcs.is_empty());
1061                                 assert!(updates.update_fee.is_none());
1062                                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1063                                 check_added_monitors!(nodes[1], 1);
1064                         },
1065                         _ => panic!("Unexpected event"),
1066                 }
1067         }
1068
1069         assert_eq!(as_cs.update_add_htlcs.len(), 1);
1070         assert!(as_cs.update_fail_htlcs.is_empty());
1071         assert!(as_cs.update_fail_malformed_htlcs.is_empty());
1072         assert!(as_cs.update_fulfill_htlcs.is_empty());
1073         assert!(as_cs.update_fee.is_none());
1074         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1075
1076
1077         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
1078         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
1079         check_added_monitors!(nodes[2], 1);
1080         let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1081
1082         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1083         check_added_monitors!(nodes[2], 1);
1084         let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1085
1086         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
1087         check_added_monitors!(nodes[1], 1);
1088         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1089
1090         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1091         check_added_monitors!(nodes[1], 1);
1092         let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1093
1094         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1095         check_added_monitors!(nodes[2], 1);
1096         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1097
1098         expect_pending_htlcs_forwardable!(nodes[2]);
1099
1100         let events_6 = nodes[2].node.get_and_clear_pending_events();
1101         assert_eq!(events_6.len(), 2);
1102         match events_6[0] {
1103                 Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
1104                 _ => panic!("Unexpected event"),
1105         };
1106         match events_6[1] {
1107                 Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1108                 _ => panic!("Unexpected event"),
1109         };
1110
1111         if test_ignore_second_cs {
1112                 expect_pending_htlcs_forwardable!(nodes[1]);
1113                 check_added_monitors!(nodes[1], 1);
1114
1115                 send_event = SendEvent::from_node(&nodes[1]);
1116                 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
1117                 assert_eq!(send_event.msgs.len(), 1);
1118                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
1119                 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
1120
1121                 expect_pending_htlcs_forwardable!(nodes[0]);
1122
1123                 let events_9 = nodes[0].node.get_and_clear_pending_events();
1124                 assert_eq!(events_9.len(), 1);
1125                 match events_9[0] {
1126                         Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
1127                         _ => panic!("Unexpected event"),
1128                 };
1129                 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
1130         }
1131
1132         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
1133 }
1134
1135 #[test]
1136 fn test_monitor_update_fail_raa() {
1137         do_test_monitor_update_fail_raa(false);
1138         do_test_monitor_update_fail_raa(true);
1139 }
1140
1141 #[test]
1142 fn test_monitor_update_fail_reestablish() {
1143         // Simple test for message retransmission after monitor update failure on
1144         // channel_reestablish generating a monitor update (which comes from freeing holding cell
1145         // HTLCs).
1146         let chanmon_cfgs = create_chanmon_cfgs(3);
1147         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1148         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1149         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1150         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1151         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1152
1153         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1154
1155         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1156         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1157
1158         assert!(nodes[2].node.claim_funds(our_payment_preimage));
1159         check_added_monitors!(nodes[2], 1);
1160         let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1161         assert!(updates.update_add_htlcs.is_empty());
1162         assert!(updates.update_fail_htlcs.is_empty());
1163         assert!(updates.update_fail_malformed_htlcs.is_empty());
1164         assert!(updates.update_fee.is_none());
1165         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1166         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1167         expect_payment_forwarded!(nodes[1], Some(1000), false);
1168         check_added_monitors!(nodes[1], 1);
1169         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1170         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1171
1172         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1173         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1174         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1175
1176         let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1177         let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1178
1179         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1180
1181         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1182         assert_eq!(
1183                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1184                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1185
1186         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1187         check_added_monitors!(nodes[1], 1);
1188
1189         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1190         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1191
1192         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1193         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1194
1195         assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
1196         assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
1197
1198         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1199         assert_eq!(
1200                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1201                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1202
1203         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1204         check_added_monitors!(nodes[1], 0);
1205         assert_eq!(
1206                 get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
1207                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1208
1209         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1210         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1211         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1212         check_added_monitors!(nodes[1], 0);
1213
1214         updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1215         assert!(updates.update_add_htlcs.is_empty());
1216         assert!(updates.update_fail_htlcs.is_empty());
1217         assert!(updates.update_fail_malformed_htlcs.is_empty());
1218         assert!(updates.update_fee.is_none());
1219         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1220         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1221         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1222
1223         let events = nodes[0].node.get_and_clear_pending_events();
1224         assert_eq!(events.len(), 1);
1225         match events[0] {
1226                 Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage),
1227                 _ => panic!("Unexpected event"),
1228         }
1229 }
1230
1231 #[test]
1232 fn raa_no_response_awaiting_raa_state() {
1233         // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1234         // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1235         // in question (assuming it intends to respond with a CS after monitor updating is restored).
1236         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1237         let chanmon_cfgs = create_chanmon_cfgs(2);
1238         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1239         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1240         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1241         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1242         let logger = test_utils::TestLogger::new();
1243
1244         let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]);
1245         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1246         let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
1247
1248         // Queue up two payments - one will be delivered right away, one immediately goes into the
1249         // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1250         // immediately after a CS. By setting failing the monitor update failure from the CS (which
1251         // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1252         // generation during RAA while in monitor-update-failed state.
1253         {
1254                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1255                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1256                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
1257                 check_added_monitors!(nodes[0], 1);
1258                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
1259                 check_added_monitors!(nodes[0], 0);
1260         }
1261
1262         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1263         assert_eq!(events.len(), 1);
1264         let payment_event = SendEvent::from_event(events.pop().unwrap());
1265         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1266         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1267         check_added_monitors!(nodes[1], 1);
1268
1269         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1270         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1271         check_added_monitors!(nodes[0], 1);
1272         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1273         assert_eq!(events.len(), 1);
1274         let payment_event = SendEvent::from_event(events.pop().unwrap());
1275
1276         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1277         check_added_monitors!(nodes[0], 1);
1278         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1279
1280         // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1281         // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1282         // then restore channel monitor updates.
1283         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1284         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1285         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1286         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1287         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1288         check_added_monitors!(nodes[1], 1);
1289
1290         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1291         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1292         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
1293         check_added_monitors!(nodes[1], 1);
1294
1295         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1296         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1297         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1298         // nodes[1] should be AwaitingRAA here!
1299         check_added_monitors!(nodes[1], 0);
1300         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1301         expect_pending_htlcs_forwardable!(nodes[1]);
1302         expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1303
1304         // We send a third payment here, which is somewhat of a redundant test, but the
1305         // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1306         // commitment transaction states) whereas here we can explicitly check for it.
1307         {
1308                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1309                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1310                 nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
1311                 check_added_monitors!(nodes[0], 0);
1312                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1313         }
1314         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1315         check_added_monitors!(nodes[0], 1);
1316         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1317         assert_eq!(events.len(), 1);
1318         let payment_event = SendEvent::from_event(events.pop().unwrap());
1319
1320         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1321         check_added_monitors!(nodes[0], 1);
1322         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1323
1324         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1325         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1326         check_added_monitors!(nodes[1], 1);
1327         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1328
1329         // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1330         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1331         check_added_monitors!(nodes[1], 1);
1332         expect_pending_htlcs_forwardable!(nodes[1]);
1333         expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1334         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1335
1336         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1337         check_added_monitors!(nodes[0], 1);
1338
1339         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1340         check_added_monitors!(nodes[0], 1);
1341         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1342
1343         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1344         check_added_monitors!(nodes[1], 1);
1345         expect_pending_htlcs_forwardable!(nodes[1]);
1346         expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
1347
1348         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1349         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1350         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
1351 }
1352
1353 #[test]
1354 fn claim_while_disconnected_monitor_update_fail() {
1355         // Test for claiming a payment while disconnected and then having the resulting
1356         // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1357         // contrived case for nodes with network instability.
1358         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1359         // code introduced a regression in this test (specifically, this caught a removal of the
1360         // channel_reestablish handling ensuring the order was sensical given the messages used).
1361         let chanmon_cfgs = create_chanmon_cfgs(2);
1362         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1363         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1364         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1365         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1366         let logger = test_utils::TestLogger::new();
1367
1368         // Forward a payment for B to claim
1369         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1370
1371         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1372         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1373
1374         assert!(nodes[1].node.claim_funds(payment_preimage_1));
1375         check_added_monitors!(nodes[1], 1);
1376
1377         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1378         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1379
1380         let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1381         let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1382
1383         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1384         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1385
1386         // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1387         // update.
1388         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1389
1390         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1391         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1392         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1393         check_added_monitors!(nodes[1], 1);
1394         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1395
1396         // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1397         // the monitor still failed
1398         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1399         {
1400                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1401                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1402                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
1403                 check_added_monitors!(nodes[0], 1);
1404         }
1405
1406         let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1407         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1408         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1409         check_added_monitors!(nodes[1], 1);
1410         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1411         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1412         // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1413         // until we've channel_monitor_update'd and updated for the new commitment transaction.
1414
1415         // Now un-fail the monitor, which will result in B sending its original commitment update,
1416         // receiving the commitment update from A, and the resulting commitment dances.
1417         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1418         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1419         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1420         check_added_monitors!(nodes[1], 0);
1421
1422         let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1423         assert_eq!(bs_msgs.len(), 2);
1424
1425         match bs_msgs[0] {
1426                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1427                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1428                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1429                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1430                         check_added_monitors!(nodes[0], 1);
1431
1432                         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1433                         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1434                         check_added_monitors!(nodes[1], 1);
1435                 },
1436                 _ => panic!("Unexpected event"),
1437         }
1438
1439         match bs_msgs[1] {
1440                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1441                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1442                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1443                         check_added_monitors!(nodes[0], 1);
1444                 },
1445                 _ => panic!("Unexpected event"),
1446         }
1447
1448         let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1449
1450         let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1451         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1452         check_added_monitors!(nodes[0], 1);
1453         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1454
1455         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1456         check_added_monitors!(nodes[1], 1);
1457         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1458         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1459         check_added_monitors!(nodes[1], 1);
1460
1461         expect_pending_htlcs_forwardable!(nodes[1]);
1462         expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1463
1464         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1465         check_added_monitors!(nodes[0], 1);
1466
1467         let events = nodes[0].node.get_and_clear_pending_events();
1468         assert_eq!(events.len(), 1);
1469         match events[0] {
1470                 Event::PaymentSent { ref payment_preimage, ref payment_hash } => {
1471                         assert_eq!(*payment_preimage, payment_preimage_1);
1472                         assert_eq!(*payment_hash, payment_hash_1);
1473                 },
1474                 _ => panic!("Unexpected event"),
1475         }
1476
1477         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1478 }
1479
1480 #[test]
1481 fn monitor_failed_no_reestablish_response() {
1482         // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1483         // response to a commitment_signed.
1484         // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1485         // debug_assert!() failure in channel_reestablish handling.
1486         let chanmon_cfgs = create_chanmon_cfgs(2);
1487         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1488         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1489         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1490         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1491         let logger = test_utils::TestLogger::new();
1492
1493         // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1494         // on receipt).
1495         let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]);
1496         {
1497                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1498                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1499                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
1500                 check_added_monitors!(nodes[0], 1);
1501         }
1502
1503         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1504         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1505         assert_eq!(events.len(), 1);
1506         let payment_event = SendEvent::from_event(events.pop().unwrap());
1507         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1508         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1509         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1510         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1511         check_added_monitors!(nodes[1], 1);
1512
1513         // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1514         // is still failing to update monitors.
1515         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1516         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1517
1518         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1519         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1520
1521         let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1522         let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1523
1524         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1525         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1526         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1527         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1528
1529         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1530         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1531         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1532         check_added_monitors!(nodes[1], 0);
1533         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1534
1535         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1536         check_added_monitors!(nodes[0], 1);
1537         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1538         check_added_monitors!(nodes[0], 1);
1539
1540         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1541         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1542         check_added_monitors!(nodes[1], 1);
1543
1544         expect_pending_htlcs_forwardable!(nodes[1]);
1545         expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1546
1547         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1548 }
1549
1550 #[test]
1551 fn first_message_on_recv_ordering() {
1552         // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1553         // messages, we're willing to flip the order of response messages if neccessary in resposne to
1554         // a commitment_signed which needs to send an RAA first.
1555         // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1556         // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1557         // response. To do this, we start routing two payments, with the final RAA for the first being
1558         // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1559         // have no pending response but will want to send a RAA/CS (with the updates for the second
1560         // payment applied).
1561         // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1562         let chanmon_cfgs = create_chanmon_cfgs(2);
1563         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1564         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1565         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1566         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1567         let logger = test_utils::TestLogger::new();
1568
1569         // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1570         // can deliver it and fail the monitor update.
1571         let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]);
1572         {
1573                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1574                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1575                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
1576                 check_added_monitors!(nodes[0], 1);
1577         }
1578
1579         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1580         assert_eq!(events.len(), 1);
1581         let payment_event = SendEvent::from_event(events.pop().unwrap());
1582         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1583         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1584         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1585         check_added_monitors!(nodes[1], 1);
1586         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1587
1588         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1589         check_added_monitors!(nodes[0], 1);
1590         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1591         check_added_monitors!(nodes[0], 1);
1592
1593         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1594
1595         // Route the second payment, generating an update_add_htlc/commitment_signed
1596         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1597         {
1598                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1599                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1600                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
1601                 check_added_monitors!(nodes[0], 1);
1602         }
1603         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1604         assert_eq!(events.len(), 1);
1605         let payment_event = SendEvent::from_event(events.pop().unwrap());
1606         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1607
1608         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1609
1610         // Deliver the final RAA for the first payment, which does not require a response. RAAs
1611         // generally require a commitment_signed, so the fact that we're expecting an opposite response
1612         // to the next message also tests resetting the delivery order.
1613         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1614         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1615         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1616         check_added_monitors!(nodes[1], 1);
1617
1618         // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1619         // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1620         // appropriate HTLC acceptance).
1621         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1622         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1623         check_added_monitors!(nodes[1], 1);
1624         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1625         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1626
1627         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1628         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1629         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1630         check_added_monitors!(nodes[1], 0);
1631
1632         expect_pending_htlcs_forwardable!(nodes[1]);
1633         expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1634
1635         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1636         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1637         check_added_monitors!(nodes[0], 1);
1638         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1639         check_added_monitors!(nodes[0], 1);
1640
1641         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1642         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1643         check_added_monitors!(nodes[1], 1);
1644
1645         expect_pending_htlcs_forwardable!(nodes[1]);
1646         expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1647
1648         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1649         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1650 }
1651
1652 #[test]
1653 fn test_monitor_update_fail_claim() {
1654         // Basic test for monitor update failures when processing claim_funds calls.
1655         // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1656         // update to claim the payment. We then send two payments C->B->A, which are held at B.
1657         // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1658         // the payments from C onwards to A.
1659         let chanmon_cfgs = create_chanmon_cfgs(3);
1660         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1661         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1662         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1663         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1664         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1665         let logger = test_utils::TestLogger::new();
1666
1667         // Rebalance a bit so that we can send backwards from 3 to 2.
1668         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1669
1670         let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1671
1672         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1673         assert!(nodes[1].node.claim_funds(payment_preimage_1));
1674         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1675         check_added_monitors!(nodes[1], 1);
1676
1677         // Note that at this point there is a pending commitment transaction update for A being held by
1678         // B. Even when we go to send the payment from C through B to A, B will not update this
1679         // already-signed commitment transaction and will instead wait for it to resolve before
1680         // forwarding the payment onwards.
1681
1682         let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]);
1683         let route;
1684         {
1685                 let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
1686                 route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap();
1687                 nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
1688                 check_added_monitors!(nodes[2], 1);
1689         }
1690
1691         // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1692         // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1693         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1694
1695         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1696         assert_eq!(events.len(), 1);
1697         let payment_event = SendEvent::from_event(events.pop().unwrap());
1698         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1699         let events = nodes[1].node.get_and_clear_pending_msg_events();
1700         assert_eq!(events.len(), 0);
1701         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1702
1703         let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1704         nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
1705         check_added_monitors!(nodes[2], 1);
1706
1707         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1708         assert_eq!(events.len(), 1);
1709         let payment_event = SendEvent::from_event(events.pop().unwrap());
1710         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1711         let events = nodes[1].node.get_and_clear_pending_msg_events();
1712         assert_eq!(events.len(), 0);
1713         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1714
1715         // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1716         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1717         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1718         check_added_monitors!(nodes[1], 0);
1719
1720         let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1721         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1722         commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1723         expect_payment_sent!(nodes[0], payment_preimage_1);
1724
1725         // Get the payment forwards, note that they were batched into one commitment update.
1726         expect_pending_htlcs_forwardable!(nodes[1]);
1727         check_added_monitors!(nodes[1], 1);
1728         let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1729         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1730         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1731         commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1732         expect_pending_htlcs_forwardable!(nodes[0]);
1733
1734         let events = nodes[0].node.get_and_clear_pending_events();
1735         assert_eq!(events.len(), 2);
1736         match events[0] {
1737                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
1738                         assert_eq!(payment_hash_2, *payment_hash);
1739                         assert_eq!(1_000_000, amt);
1740                         match &purpose {
1741                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1742                                         assert!(payment_preimage.is_none());
1743                                         assert_eq!(payment_secret_2, *payment_secret);
1744                                 },
1745                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1746                         }
1747                 },
1748                 _ => panic!("Unexpected event"),
1749         }
1750         match events[1] {
1751                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
1752                         assert_eq!(payment_hash_3, *payment_hash);
1753                         assert_eq!(1_000_000, amt);
1754                         match &purpose {
1755                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1756                                         assert!(payment_preimage.is_none());
1757                                         assert_eq!(payment_secret_3, *payment_secret);
1758                                 },
1759                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1760                         }
1761                 },
1762                 _ => panic!("Unexpected event"),
1763         }
1764 }
1765
1766 #[test]
1767 fn test_monitor_update_on_pending_forwards() {
1768         // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1769         // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1770         // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1771         // from C to A will be pending a forward to A.
1772         let chanmon_cfgs = create_chanmon_cfgs(3);
1773         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1774         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1775         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1776         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1777         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1778         let logger = test_utils::TestLogger::new();
1779
1780         // Rebalance a bit so that we can send backwards from 3 to 1.
1781         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1782
1783         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1784         assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
1785         expect_pending_htlcs_forwardable!(nodes[2]);
1786         check_added_monitors!(nodes[2], 1);
1787
1788         let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1789         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1790         commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1791         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1792
1793         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]);
1794         {
1795                 let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
1796                 let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1797                 nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
1798                 check_added_monitors!(nodes[2], 1);
1799         }
1800
1801         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1802         assert_eq!(events.len(), 1);
1803         let payment_event = SendEvent::from_event(events.pop().unwrap());
1804         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1805         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1806
1807         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1808         expect_pending_htlcs_forwardable!(nodes[1]);
1809         check_added_monitors!(nodes[1], 1);
1810         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1811         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1812
1813         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1814         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1815         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1816         check_added_monitors!(nodes[1], 0);
1817
1818         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1819         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1820         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1821         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1822
1823         let events = nodes[0].node.get_and_clear_pending_events();
1824         assert_eq!(events.len(), 2);
1825         if let Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1826                 assert_eq!(payment_hash, payment_hash_1);
1827                 assert!(rejected_by_dest);
1828         } else { panic!("Unexpected event!"); }
1829         match events[1] {
1830                 Event::PendingHTLCsForwardable { .. } => { },
1831                 _ => panic!("Unexpected event"),
1832         };
1833         nodes[0].node.process_pending_htlc_forwards();
1834         expect_payment_received!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
1835
1836         claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
1837 }
1838
1839 #[test]
1840 fn monitor_update_claim_fail_no_response() {
1841         // Test for claim_funds resulting in both a monitor update failure and no message response (due
1842         // to channel being AwaitingRAA).
1843         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1844         // code was broken.
1845         let chanmon_cfgs = create_chanmon_cfgs(2);
1846         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1847         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1848         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1849         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1850         let logger = test_utils::TestLogger::new();
1851
1852         // Forward a payment for B to claim
1853         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1854
1855         // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1856         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1857         {
1858                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1859                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
1860                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
1861                 check_added_monitors!(nodes[0], 1);
1862         }
1863
1864         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1865         assert_eq!(events.len(), 1);
1866         let payment_event = SendEvent::from_event(events.pop().unwrap());
1867         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1868         let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1869
1870         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1871         assert!(nodes[1].node.claim_funds(payment_preimage_1));
1872         check_added_monitors!(nodes[1], 1);
1873         let events = nodes[1].node.get_and_clear_pending_msg_events();
1874         assert_eq!(events.len(), 0);
1875         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1876
1877         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1878         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1879         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1880         check_added_monitors!(nodes[1], 0);
1881         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1882
1883         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1884         check_added_monitors!(nodes[1], 1);
1885         expect_pending_htlcs_forwardable!(nodes[1]);
1886         expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1887
1888         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1889         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1890         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1891
1892         let events = nodes[0].node.get_and_clear_pending_events();
1893         assert_eq!(events.len(), 1);
1894         match events[0] {
1895                 Event::PaymentSent { ref payment_preimage, ref payment_hash } => {
1896                         assert_eq!(*payment_preimage, payment_preimage_1);
1897                         assert_eq!(*payment_hash, payment_hash_1);
1898                 },
1899                 _ => panic!("Unexpected event"),
1900         }
1901
1902         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1903 }
1904
1905 // confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and
1906 // restore_b_before_conf has no meaning if !confirm_a_first
1907 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool) {
1908         // Test that if the monitor update generated by funding_transaction_generated fails we continue
1909         // the channel setup happily after the update is restored.
1910         let chanmon_cfgs = create_chanmon_cfgs(2);
1911         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1912         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1913         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1914
1915         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1916         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1917         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1918
1919         let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
1920
1921         nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_tx.clone()).unwrap();
1922         check_added_monitors!(nodes[0], 0);
1923
1924         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1925         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1926         let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1927         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1928         check_added_monitors!(nodes[1], 1);
1929
1930         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1931         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1932         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1933         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1934         check_added_monitors!(nodes[0], 1);
1935         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1936         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1937         let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1938         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
1939         check_added_monitors!(nodes[0], 0);
1940
1941         let events = nodes[0].node.get_and_clear_pending_events();
1942         assert_eq!(events.len(), 0);
1943         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1944         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].txid(), funding_output.txid);
1945
1946         if confirm_a_first {
1947                 confirm_transaction(&nodes[0], &funding_tx);
1948                 nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
1949         } else {
1950                 assert!(!restore_b_before_conf);
1951                 confirm_transaction(&nodes[1], &funding_tx);
1952                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1953         }
1954
1955         // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
1956         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1957         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1958         reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1959         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1960         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1961
1962         if !restore_b_before_conf {
1963                 confirm_transaction(&nodes[1], &funding_tx);
1964                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1965                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1966         }
1967
1968         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
1969         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1970         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1971         check_added_monitors!(nodes[1], 0);
1972
1973         let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1974                 nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
1975
1976                 confirm_transaction(&nodes[0], &funding_tx);
1977                 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1978                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
1979         } else {
1980                 if restore_b_before_conf {
1981                         confirm_transaction(&nodes[1], &funding_tx);
1982                 }
1983                 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1984                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
1985         };
1986         for node in nodes.iter() {
1987                 assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
1988                 node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
1989                 node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
1990         }
1991
1992         send_payment(&nodes[0], &[&nodes[1]], 8000000);
1993         close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1994         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
1995         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1996 }
1997
1998 #[test]
1999 fn during_funding_monitor_fail() {
2000         do_during_funding_monitor_fail(true, true);
2001         do_during_funding_monitor_fail(true, false);
2002         do_during_funding_monitor_fail(false, false);
2003 }
2004
2005 #[test]
2006 fn test_path_paused_mpp() {
2007         // Simple test of sending a multi-part payment where one path is currently blocked awaiting
2008         // monitor update
2009         let chanmon_cfgs = create_chanmon_cfgs(4);
2010         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
2011         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
2012         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
2013
2014         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
2015         let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
2016         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
2017         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
2018         let logger = test_utils::TestLogger::new();
2019
2020         let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]);
2021         let mut route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
2022
2023         // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
2024         let path = route.paths[0].clone();
2025         route.paths.push(path);
2026         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
2027         route.paths[0][0].short_channel_id = chan_1_id;
2028         route.paths[0][1].short_channel_id = chan_3_id;
2029         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
2030         route.paths[1][0].short_channel_id = chan_2_ann.contents.short_channel_id;
2031         route.paths[1][1].short_channel_id = chan_4_id;
2032
2033         // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
2034         // (for the path 0 -> 2 -> 3) fails.
2035         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
2036         *nodes[0].chain_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
2037
2038         // Now check that we get the right return value, indicating that the first path succeeded but
2039         // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
2040         // some paths succeeded, preventing retry.
2041         if let Err(PaymentSendFailure::PartialFailure(results)) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) {
2042                 assert_eq!(results.len(), 2);
2043                 if let Ok(()) = results[0] {} else { panic!(); }
2044                 if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
2045         } else { panic!(); }
2046         check_added_monitors!(nodes[0], 2);
2047         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
2048
2049         // Pass the first HTLC of the payment along to nodes[3].
2050         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2051         assert_eq!(events.len(), 1);
2052         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
2053
2054         // And check that, after we successfully update the monitor for chan_2 we can pass the second
2055         // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
2056         let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
2057         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
2058         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2059         assert_eq!(events.len(), 1);
2060         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
2061
2062         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
2063 }
2064
2065 #[test]
2066 fn test_pending_update_fee_ack_on_reconnect() {
2067         // In early versions of our automated fee update patch, nodes did not correctly use the
2068         // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an
2069         // undelivered commitment_signed.
2070         //
2071         // B sends A new HTLC + CS, not delivered
2072         // A sends B update_fee + CS
2073         // B receives the CS and sends RAA, previously causing B to lock in the new feerate
2074         // reconnect
2075         // B resends initial CS, using the original fee
2076
2077         let chanmon_cfgs = create_chanmon_cfgs(2);
2078         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2079         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2080         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2081
2082         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2083         send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
2084
2085         let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
2086         let route = get_route(&nodes[1].node.get_our_node_id(), &nodes[1].net_graph_msg_handler.network_graph,
2087                 &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap();
2088         nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
2089         check_added_monitors!(nodes[1], 1);
2090         let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2091         // bs_initial_send_msgs are not delivered until they are re-generated after reconnect
2092
2093         {
2094                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2095                 *feerate_lock *= 2;
2096         }
2097         nodes[0].node.timer_tick_occurred();
2098         check_added_monitors!(nodes[0], 1);
2099         let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2100         assert!(as_update_fee_msgs.update_fee.is_some());
2101
2102         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap());
2103         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed);
2104         check_added_monitors!(nodes[1], 1);
2105         let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2106         // bs_first_raa is not delivered until it is re-generated after reconnect
2107
2108         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2109         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
2110
2111         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
2112         let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
2113         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
2114         let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
2115
2116         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2117         let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
2118         assert_eq!(bs_resend_msgs.len(), 3);
2119         if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] {
2120                 assert_eq!(*updates, bs_initial_send_msgs);
2121         } else { panic!(); }
2122         if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] {
2123                 assert_eq!(*msg, bs_first_raa);
2124         } else { panic!(); }
2125         if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); }
2126
2127         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2128         get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
2129
2130         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]);
2131         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed);
2132         check_added_monitors!(nodes[0], 1);
2133         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2134         check_added_monitors!(nodes[1], 1);
2135         let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed;
2136
2137         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2138         check_added_monitors!(nodes[0], 1);
2139         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed);
2140         check_added_monitors!(nodes[1], 1);
2141         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2142
2143         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
2144         check_added_monitors!(nodes[0], 1);
2145         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2146         check_added_monitors!(nodes[0], 1);
2147
2148         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2149         check_added_monitors!(nodes[1], 1);
2150
2151         expect_pending_htlcs_forwardable!(nodes[0]);
2152         expect_payment_received!(nodes[0], payment_hash, payment_secret, 1_000_000);
2153
2154         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
2155 }
2156
2157 fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
2158         // In early versions we did not handle resending of update_fee on reconnect correctly. The
2159         // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
2160         // explicitly here.
2161         let chanmon_cfgs = create_chanmon_cfgs(2);
2162         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2163         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2164         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2165
2166         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2167         send_payment(&nodes[0], &[&nodes[1]], 1000);
2168
2169         {
2170                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2171                 *feerate_lock += 20;
2172         }
2173         nodes[0].node.timer_tick_occurred();
2174         check_added_monitors!(nodes[0], 1);
2175         let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2176         assert!(update_msgs.update_fee.is_some());
2177         if deliver_update {
2178                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2179         }
2180
2181         if parallel_updates {
2182                 {
2183                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2184                         *feerate_lock += 20;
2185                 }
2186                 nodes[0].node.timer_tick_occurred();
2187                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2188         }
2189
2190         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2191         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
2192
2193         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
2194         let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
2195         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
2196         let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
2197
2198         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2199         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
2200         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2201
2202         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2203         let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2204         assert_eq!(as_reconnect_msgs.len(), 2);
2205         if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); }
2206         let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap()
2207                 { updates } else { panic!(); };
2208         assert!(update_msgs.update_fee.is_some());
2209         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2210         if parallel_updates {
2211                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed);
2212                 check_added_monitors!(nodes[1], 1);
2213                 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2214                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2215                 check_added_monitors!(nodes[0], 1);
2216                 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2217
2218                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2219                 check_added_monitors!(nodes[0], 1);
2220                 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2221
2222                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap());
2223                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
2224                 check_added_monitors!(nodes[1], 1);
2225                 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2226
2227                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2228                 let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2229                 check_added_monitors!(nodes[1], 1);
2230
2231                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2232                 check_added_monitors!(nodes[0], 1);
2233
2234                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed);
2235                 check_added_monitors!(nodes[0], 1);
2236                 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2237
2238                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2239                 check_added_monitors!(nodes[1], 1);
2240         } else {
2241                 commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false);
2242         }
2243
2244         send_payment(&nodes[0], &[&nodes[1]], 1000);
2245 }
2246 #[test]
2247 fn update_fee_resend_test() {
2248         do_update_fee_resend_test(false, false);
2249         do_update_fee_resend_test(true, false);
2250         do_update_fee_resend_test(false, true);
2251         do_update_fee_resend_test(true, true);
2252 }
2253
2254 fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
2255         // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
2256         // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
2257         // that behavior was both somewhat unexpected and also broken (there was a debug assertion
2258         // which failed in such a case).
2259         let chanmon_cfgs = create_chanmon_cfgs(2);
2260         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2261         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2262         let persister: test_utils::TestPersister;
2263         let new_chain_monitor: test_utils::TestChainMonitor;
2264         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
2265         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2266
2267         let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000, InitFeatures::known(), InitFeatures::known()).2;
2268         let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(&nodes[1]);
2269         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
2270
2271         // Do a really complicated dance to get an HTLC into the holding cell, with MonitorUpdateFailed
2272         // set but AwaitingRemoteRevoke unset. When this test was written, any attempts to send an HTLC
2273         // while MonitorUpdateFailed is set are immediately failed-backwards. Thus, the only way to get
2274         // an AddHTLC into the holding cell is to add it while AwaitingRemoteRevoke is set but
2275         // MonitorUpdateFailed is unset, and then swap the flags.
2276         //
2277         // We do this by:
2278         //  a) routing a payment from node B to node A,
2279         //  b) sending a payment from node A to node B without delivering any of the generated messages,
2280         //     putting node A in AwaitingRemoteRevoke,
2281         //  c) sending a second payment from node A to node B, which is immediately placed in the
2282         //     holding cell,
2283         //  d) claiming the first payment from B, allowing us to fail the monitor update which occurs
2284         //     when we try to persist the payment preimage,
2285         //  e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
2286         //     clearing AwaitingRemoteRevoke on node A.
2287         //
2288         // Note that because, at the end, MonitorUpdateFailed is still set, the HTLC generated in (c)
2289         // will not be freed from the holding cell.
2290         let (payment_preimage_0, _, _) = route_payment(&nodes[1], &[&nodes[0]], 100000);
2291
2292         let route = {
2293                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
2294                 get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), None, None, &Vec::new(), 100000, TEST_FINAL_CLTV, nodes[0].logger).unwrap()
2295         };
2296
2297         nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
2298         check_added_monitors!(nodes[0], 1);
2299         let send = SendEvent::from_node(&nodes[0]);
2300         assert_eq!(send.msgs.len(), 1);
2301
2302         nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
2303         check_added_monitors!(nodes[0], 0);
2304
2305         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
2306         assert!(nodes[0].node.claim_funds(payment_preimage_0));
2307         check_added_monitors!(nodes[0], 1);
2308
2309         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
2310         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
2311         check_added_monitors!(nodes[1], 1);
2312
2313         let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2314
2315         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
2316         check_added_monitors!(nodes[0], 1);
2317
2318         if disconnect {
2319                 // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
2320                 // disconnect the peers. Note that the fuzzer originally found this issue because
2321                 // deserializing a ChannelManager in this state causes an assertion failure.
2322                 if reload_a {
2323                         let nodes_0_serialized = nodes[0].node.encode();
2324                         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
2325                         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
2326
2327                         persister = test_utils::TestPersister::new();
2328                         let keys_manager = &chanmon_cfgs[0].keys_manager;
2329                         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
2330                         nodes[0].chain_monitor = &new_chain_monitor;
2331                         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
2332                         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
2333                                 &mut chan_0_monitor_read, keys_manager).unwrap();
2334                         assert!(chan_0_monitor_read.is_empty());
2335
2336                         let mut nodes_0_read = &nodes_0_serialized[..];
2337                         let config = UserConfig::default();
2338                         nodes_0_deserialized = {
2339                                 let mut channel_monitors = HashMap::new();
2340                                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
2341                                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
2342                                         default_config: config,
2343                                         keys_manager,
2344                                         fee_estimator: node_cfgs[0].fee_estimator,
2345                                         chain_monitor: nodes[0].chain_monitor,
2346                                         tx_broadcaster: nodes[0].tx_broadcaster.clone(),
2347                                         logger: nodes[0].logger,
2348                                         channel_monitors,
2349                                 }).unwrap().1
2350                         };
2351                         nodes[0].node = &nodes_0_deserialized;
2352                         assert!(nodes_0_read.is_empty());
2353
2354                         nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
2355                         check_added_monitors!(nodes[0], 1);
2356                 } else {
2357                         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2358                 }
2359                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
2360
2361                 // Now reconnect the two
2362                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
2363                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
2364                 assert_eq!(reestablish_1.len(), 1);
2365                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
2366                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
2367                 assert_eq!(reestablish_2.len(), 1);
2368
2369                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
2370                 let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
2371                 check_added_monitors!(nodes[1], 0);
2372
2373                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
2374                 let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
2375
2376                 assert!(resp_0.0.is_none());
2377                 assert!(resp_0.1.is_none());
2378                 assert!(resp_0.2.is_none());
2379                 assert!(resp_1.0.is_none());
2380                 assert!(resp_1.1.is_none());
2381
2382                 // Check that the freshly-generated cs is equal to the original (which we will deliver in a
2383                 // moment).
2384                 if let Some(pending_cs) = resp_1.2 {
2385                         assert!(pending_cs.update_add_htlcs.is_empty());
2386                         assert!(pending_cs.update_fail_htlcs.is_empty());
2387                         assert!(pending_cs.update_fulfill_htlcs.is_empty());
2388                         assert_eq!(pending_cs.commitment_signed, cs);
2389                 } else { panic!(); }
2390
2391                 // There should be no monitor updates as we are still pending awaiting a failed one.
2392                 check_added_monitors!(nodes[0], 0);
2393                 check_added_monitors!(nodes[1], 0);
2394         }
2395
2396         // If we finish updating the monitor, we should free the holding cell right away (this did
2397         // not occur prior to #756).
2398         *nodes[0].chain_monitor.update_ret.lock().unwrap() = None;
2399         let (funding_txo, mon_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
2400         nodes[0].node.channel_monitor_updated(&funding_txo, mon_id);
2401
2402         // New outbound messages should be generated immediately upon a call to
2403         // get_and_clear_pending_msg_events (but not before).
2404         check_added_monitors!(nodes[0], 0);
2405         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2406         check_added_monitors!(nodes[0], 1);
2407         assert_eq!(events.len(), 1);
2408
2409         // Deliver the pending in-flight CS
2410         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
2411         check_added_monitors!(nodes[0], 1);
2412
2413         let commitment_msg = match events.pop().unwrap() {
2414                 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2415                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
2416                         assert!(updates.update_fail_htlcs.is_empty());
2417                         assert!(updates.update_fail_malformed_htlcs.is_empty());
2418                         assert!(updates.update_fee.is_none());
2419                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2420                         nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
2421                         expect_payment_sent!(nodes[1], payment_preimage_0);
2422                         assert_eq!(updates.update_add_htlcs.len(), 1);
2423                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
2424                         updates.commitment_signed
2425                 },
2426                 _ => panic!("Unexpected event type!"),
2427         };
2428
2429         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg);
2430         check_added_monitors!(nodes[1], 1);
2431
2432         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2433         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
2434         expect_pending_htlcs_forwardable!(nodes[1]);
2435         expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 100000);
2436         check_added_monitors!(nodes[1], 1);
2437
2438         commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
2439
2440         expect_pending_htlcs_forwardable!(nodes[1]);
2441         expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000);
2442
2443         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
2444         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
2445 }
2446 #[test]
2447 fn channel_holding_cell_serialize() {
2448         do_channel_holding_cell_serialize(true, true);
2449         do_channel_holding_cell_serialize(true, false);
2450         do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
2451 }
2452
2453 #[derive(PartialEq)]
2454 enum HTLCStatusAtDupClaim {
2455         Received,
2456         HoldingCell,
2457         Cleared,
2458 }
2459 fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
2460         // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2461         // along the payment path before waiting for a full commitment_signed dance. This is great, but
2462         // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2463         // reconnects, and then has to re-send its update_fulfill_htlc message again.
2464         // In previous code, we didn't handle the double-claim correctly, spuriously closing the
2465         // channel on which the inbound HTLC was received.
2466         let chanmon_cfgs = create_chanmon_cfgs(3);
2467         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2468         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2469         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2470
2471         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2472         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2;
2473
2474         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
2475
2476         let mut as_raa = None;
2477         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2478                 // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2479                 // awaiting a remote revoke_and_ack from nodes[0].
2480                 let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[1]);
2481                 let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph,
2482                         &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap();
2483                 nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap();
2484                 check_added_monitors!(nodes[0], 1);
2485
2486                 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2487                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2488                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2489                 check_added_monitors!(nodes[1], 1);
2490
2491                 let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2492                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2493                 check_added_monitors!(nodes[0], 1);
2494                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2495                 check_added_monitors!(nodes[0], 1);
2496
2497                 as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2498         }
2499
2500         let fulfill_msg = msgs::UpdateFulfillHTLC {
2501                 channel_id: chan_2,
2502                 htlc_id: 0,
2503                 payment_preimage,
2504         };
2505         if second_fails {
2506                 assert!(nodes[2].node.fail_htlc_backwards(&payment_hash));
2507                 expect_pending_htlcs_forwardable!(nodes[2]);
2508                 check_added_monitors!(nodes[2], 1);
2509                 get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2510         } else {
2511                 assert!(nodes[2].node.claim_funds(payment_preimage));
2512                 check_added_monitors!(nodes[2], 1);
2513                 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2514                 assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2515                 // Check that the message we're about to deliver matches the one generated:
2516                 assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
2517         }
2518         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
2519         expect_payment_forwarded!(nodes[1], Some(1000), false);
2520         check_added_monitors!(nodes[1], 1);
2521
2522         let mut bs_updates = None;
2523         if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2524                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2525                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2526                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2527                 expect_payment_sent!(nodes[0], payment_preimage);
2528                 if htlc_status == HTLCStatusAtDupClaim::Cleared {
2529                         commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2530                 }
2531         } else {
2532                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2533         }
2534
2535         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
2536         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2537
2538         if second_fails {
2539                 reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
2540                 expect_pending_htlcs_forwardable!(nodes[1]);
2541         } else {
2542                 reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
2543         }
2544
2545         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2546                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2547                 check_added_monitors!(nodes[1], 1);
2548                 expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2549
2550                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2551                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2552                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2553                 expect_payment_sent!(nodes[0], payment_preimage);
2554         }
2555         if htlc_status != HTLCStatusAtDupClaim::Cleared {
2556                 commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2557         }
2558 }
2559
2560 #[test]
2561 fn test_reconnect_dup_htlc_claims() {
2562         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
2563         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
2564         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
2565         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
2566         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
2567         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
2568 }
2569
2570 #[test]
2571 fn test_temporary_error_during_shutdown() {
2572         // Test that temporary failures when updating the monitor's shutdown script delay cooperative
2573         // close.
2574         let mut config = test_default_channel_config();
2575         config.channel_options.commit_upfront_shutdown_pubkey = false;
2576
2577         let chanmon_cfgs = create_chanmon_cfgs(2);
2578         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2579         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
2580         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2581
2582         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2583
2584         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
2585         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
2586
2587         nodes[0].node.close_channel(&channel_id).unwrap();
2588         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
2589         check_added_monitors!(nodes[1], 1);
2590
2591         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
2592         check_added_monitors!(nodes[0], 1);
2593
2594         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2595
2596         *nodes[0].chain_monitor.update_ret.lock().unwrap() = None;
2597         *nodes[1].chain_monitor.update_ret.lock().unwrap() = None;
2598
2599         let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2600         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
2601         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()));
2602
2603         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2604
2605         *nodes[1].chain_monitor.update_ret.lock().unwrap() = None;
2606         let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2607         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
2608
2609         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
2610         let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
2611         let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2612
2613         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap());
2614         let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
2615         assert!(none_b.is_none());
2616         let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2617
2618         assert_eq!(txn_a, txn_b);
2619         assert_eq!(txn_a.len(), 1);
2620         check_spends!(txn_a[0], funding_tx);
2621         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
2622         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
2623 }
2624
2625 #[test]
2626 fn test_permanent_error_during_sending_shutdown() {
2627         // Test that permanent failures when updating the monitor's shutdown script result in a force
2628         // close when initiating a cooperative close.
2629         let mut config = test_default_channel_config();
2630         config.channel_options.commit_upfront_shutdown_pubkey = false;
2631
2632         let chanmon_cfgs = create_chanmon_cfgs(2);
2633         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2634         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
2635         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2636
2637         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
2638         *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure));
2639
2640         assert!(nodes[0].node.close_channel(&channel_id).is_ok());
2641         check_closed_broadcast!(nodes[0], true);
2642         check_added_monitors!(nodes[0], 2);
2643         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2644 }
2645
2646 #[test]
2647 fn test_permanent_error_during_handling_shutdown() {
2648         // Test that permanent failures when updating the monitor's shutdown script result in a force
2649         // close when handling a cooperative close.
2650         let mut config = test_default_channel_config();
2651         config.channel_options.commit_upfront_shutdown_pubkey = false;
2652
2653         let chanmon_cfgs = create_chanmon_cfgs(2);
2654         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2655         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
2656         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2657
2658         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
2659         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure));
2660
2661         assert!(nodes[0].node.close_channel(&channel_id).is_ok());
2662         let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
2663         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &shutdown);
2664         check_closed_broadcast!(nodes[1], true);
2665         check_added_monitors!(nodes[1], 2);
2666         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2667 }
2668
2669 #[test]
2670 fn double_temp_error() {
2671         // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row.
2672         let chanmon_cfgs = create_chanmon_cfgs(2);
2673         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2674         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2675         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2676
2677         let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2678
2679         let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
2680         let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
2681
2682         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
2683         // `claim_funds` results in a ChannelMonitorUpdate.
2684         assert!(nodes[1].node.claim_funds(payment_preimage_1));
2685         check_added_monitors!(nodes[1], 1);
2686         let (funding_tx, latest_update_1) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2687
2688         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
2689         // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
2690         // which had some asserts that prevented it from being called twice.
2691         assert!(nodes[1].node.claim_funds(payment_preimage_2));
2692         check_added_monitors!(nodes[1], 1);
2693         *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
2694
2695         let (_, latest_update_2) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2696         nodes[1].node.channel_monitor_updated(&funding_tx, latest_update_1);
2697         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2698         check_added_monitors!(nodes[1], 0);
2699         nodes[1].node.channel_monitor_updated(&funding_tx, latest_update_2);
2700
2701         // Complete the first HTLC.
2702         let events = nodes[1].node.get_and_clear_pending_msg_events();
2703         assert_eq!(events.len(), 1);
2704         let (update_fulfill_1, commitment_signed_b1, node_id) = {
2705                 match &events[0] {
2706                         &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2707                                 assert!(update_add_htlcs.is_empty());
2708                                 assert_eq!(update_fulfill_htlcs.len(), 1);
2709                                 assert!(update_fail_htlcs.is_empty());
2710                                 assert!(update_fail_malformed_htlcs.is_empty());
2711                                 assert!(update_fee.is_none());
2712                                 (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
2713                         },
2714                         _ => panic!("Unexpected event"),
2715                 }
2716         };
2717         assert_eq!(node_id, nodes[0].node.get_our_node_id());
2718         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
2719         check_added_monitors!(nodes[0], 0);
2720         expect_payment_sent!(nodes[0], payment_preimage_1);
2721         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
2722         check_added_monitors!(nodes[0], 1);
2723         nodes[0].node.process_pending_htlc_forwards();
2724         let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2725         check_added_monitors!(nodes[1], 0);
2726         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2727         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1);
2728         check_added_monitors!(nodes[1], 1);
2729         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1);
2730         check_added_monitors!(nodes[1], 1);
2731
2732         // Complete the second HTLC.
2733         let ((update_fulfill_2, commitment_signed_b2), raa_b2) = {
2734                 let events = nodes[1].node.get_and_clear_pending_msg_events();
2735                 assert_eq!(events.len(), 2);
2736                 (match &events[0] {
2737                         MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2738                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2739                                 assert!(updates.update_add_htlcs.is_empty());
2740                                 assert!(updates.update_fail_htlcs.is_empty());
2741                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
2742                                 assert!(updates.update_fee.is_none());
2743                                 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2744                                 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
2745                         },
2746                         _ => panic!("Unexpected event"),
2747                 },
2748                  match events[1] {
2749                          MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
2750                                  assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2751                                  (*msg).clone()
2752                          },
2753                          _ => panic!("Unexpected event"),
2754                  })
2755         };
2756         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2);
2757         check_added_monitors!(nodes[0], 1);
2758
2759         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2);
2760         check_added_monitors!(nodes[0], 0);
2761         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2762         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
2763         expect_payment_sent!(nodes[0], payment_preimage_2);
2764 }