cae75734703e75283d26df6c3346e0d7902a4a43
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
1 //! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
2 //! monitor updates.
3 //! There are a bunch of these as their handling is relatively error-prone so they are split out
4 //! here. See also the chanmon_fail_consistency fuzz test.
5
6 use chain::transaction::OutPoint;
7 use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure};
8 use ln::channelmonitor::ChannelMonitorUpdateErr;
9 use ln::features::InitFeatures;
10 use ln::msgs;
11 use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler};
12 use routing::router::get_route;
13 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
14 use util::errors::APIError;
15
16 use bitcoin::hashes::sha256::Hash as Sha256;
17 use bitcoin::hashes::Hash;
18
19 use ln::functional_test_utils::*;
20
21 use std::sync::Arc;
22 use util::test_utils;
23
24 #[test]
25 fn test_simple_monitor_permanent_update_fail() {
26         // Test that we handle a simple permanent monitor update failure
27         let chanmon_cfgs = create_chanmon_cfgs(2);
28         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
29         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
30         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
31         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
32         let logger = Arc::new(test_utils::TestLogger::new());
33
34         let (_, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
35
36         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
37         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
38         let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
39         unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable {..}, {});
40         check_added_monitors!(nodes[0], 2);
41
42         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
43         assert_eq!(events_1.len(), 2);
44         match events_1[0] {
45                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
46                 _ => panic!("Unexpected event"),
47         };
48         match events_1[1] {
49                 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
50                 _ => panic!("Unexpected event"),
51         };
52
53         // TODO: Once we hit the chain with the failure transaction we should check that we get a
54         // PaymentFailed event
55
56         assert_eq!(nodes[0].node.list_channels().len(), 0);
57 }
58
59 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
60         // Test that we can recover from a simple temporary monitor update failure optionally with
61         // a disconnect in between
62         let chanmon_cfgs = create_chanmon_cfgs(2);
63         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
64         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
65         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
66         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
67         let logger = Arc::new(test_utils::TestLogger::new());
68
69         let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
70
71         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
72
73         {
74                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
75                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
76                 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), false, APIError::MonitorUpdateFailed, {});
77                 check_added_monitors!(nodes[0], 1);
78         }
79
80         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
81         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
82         assert_eq!(nodes[0].node.list_channels().len(), 1);
83
84         if disconnect {
85                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
86                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
87                 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
88         }
89
90         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
91         let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
92         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
93         check_added_monitors!(nodes[0], 0);
94
95         let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
96         assert_eq!(events_2.len(), 1);
97         let payment_event = SendEvent::from_event(events_2.pop().unwrap());
98         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
99         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
100         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
101
102         expect_pending_htlcs_forwardable!(nodes[1]);
103
104         let events_3 = nodes[1].node.get_and_clear_pending_events();
105         assert_eq!(events_3.len(), 1);
106         match events_3[0] {
107                 Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => {
108                         assert_eq!(payment_hash_1, *payment_hash);
109                         assert_eq!(*payment_secret, None);
110                         assert_eq!(amt, 1000000);
111                 },
112                 _ => panic!("Unexpected event"),
113         }
114
115         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
116
117         // Now set it to failed again...
118         let (_, payment_hash_2) = get_payment_preimage_hash!(&nodes[0]);
119         {
120                 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
121                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
122                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
123                 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
124                 check_added_monitors!(nodes[0], 1);
125         }
126
127         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
128         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
129         assert_eq!(nodes[0].node.list_channels().len(), 1);
130
131         if disconnect {
132                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
133                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
134                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
135         }
136
137         // ...and make sure we can force-close a frozen channel
138         nodes[0].node.force_close_channel(&channel_id);
139         check_added_monitors!(nodes[0], 1);
140         check_closed_broadcast!(nodes[0], false);
141
142         // TODO: Once we hit the chain with the failure transaction we should check that we get a
143         // PaymentFailed event
144
145         assert_eq!(nodes[0].node.list_channels().len(), 0);
146 }
147
148 #[test]
149 fn test_simple_monitor_temporary_update_fail() {
150         do_test_simple_monitor_temporary_update_fail(false);
151         do_test_simple_monitor_temporary_update_fail(true);
152 }
153
154 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
155         let disconnect_flags = 8 | 16;
156
157         // Test that we can recover from a temporary monitor update failure with some in-flight
158         // HTLCs going on at the same time potentially with some disconnection thrown in.
159         // * First we route a payment, then get a temporary monitor update failure when trying to
160         //   route a second payment. We then claim the first payment.
161         // * If disconnect_count is set, we will disconnect at this point (which is likely as
162         //   TemporaryFailure likely indicates net disconnect which resulted in failing to update
163         //   the ChannelMonitor on a watchtower).
164         // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
165         //   immediately, otherwise we wait disconnect and deliver them via the reconnect
166         //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
167         //   disconnect_count & !disconnect_flags is 0).
168         // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
169         //   through message sending, potentially disconnect/reconnecting multiple times based on
170         //   disconnect_count, to get the update_fulfill_htlc through.
171         // * We then walk through more message exchanges to get the original update_add_htlc
172         //   through, swapping message ordering based on disconnect_count & 8 and optionally
173         //   disconnect/reconnecting based on disconnect_count.
174         let chanmon_cfgs = create_chanmon_cfgs(2);
175         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
176         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
177         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
178         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
179         let logger = Arc::new(test_utils::TestLogger::new());
180
181         let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
182
183         // Now try to send a second payment which will fail to send
184         let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
185         {
186                 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
187                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
188                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
189                 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
190                 check_added_monitors!(nodes[0], 1);
191         }
192
193         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
194         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
195         assert_eq!(nodes[0].node.list_channels().len(), 1);
196
197         // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
198         // but nodes[0] won't respond since it is frozen.
199         assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
200         check_added_monitors!(nodes[1], 1);
201         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
202         assert_eq!(events_2.len(), 1);
203         let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
204                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
205                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
206                         assert!(update_add_htlcs.is_empty());
207                         assert_eq!(update_fulfill_htlcs.len(), 1);
208                         assert!(update_fail_htlcs.is_empty());
209                         assert!(update_fail_malformed_htlcs.is_empty());
210                         assert!(update_fee.is_none());
211
212                         if (disconnect_count & 16) == 0 {
213                                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
214                                 let events_3 = nodes[0].node.get_and_clear_pending_events();
215                                 assert_eq!(events_3.len(), 1);
216                                 match events_3[0] {
217                                         Event::PaymentSent { ref payment_preimage } => {
218                                                 assert_eq!(*payment_preimage, payment_preimage_1);
219                                         },
220                                         _ => panic!("Unexpected event"),
221                                 }
222
223                                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
224                                 check_added_monitors!(nodes[0], 1);
225                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
226                                 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
227                         }
228
229                         (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
230                 },
231                 _ => panic!("Unexpected event"),
232         };
233
234         if disconnect_count & !disconnect_flags > 0 {
235                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
236                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
237         }
238
239         // Now fix monitor updating...
240         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
241         let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
242         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
243         check_added_monitors!(nodes[0], 0);
244
245         macro_rules! disconnect_reconnect_peers { () => { {
246                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
247                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
248
249                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
250                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
251                 assert_eq!(reestablish_1.len(), 1);
252                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
253                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
254                 assert_eq!(reestablish_2.len(), 1);
255
256                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
257                 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
258                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
259                 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
260
261                 assert!(as_resp.0.is_none());
262                 assert!(bs_resp.0.is_none());
263
264                 (reestablish_1, reestablish_2, as_resp, bs_resp)
265         } } }
266
267         let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
268                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
269                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
270
271                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
272                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
273                 assert_eq!(reestablish_1.len(), 1);
274                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
275                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
276                 assert_eq!(reestablish_2.len(), 1);
277
278                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
279                 check_added_monitors!(nodes[0], 0);
280                 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
281                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
282                 check_added_monitors!(nodes[1], 0);
283                 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
284
285                 assert!(as_resp.0.is_none());
286                 assert!(bs_resp.0.is_none());
287
288                 assert!(bs_resp.1.is_none());
289                 if (disconnect_count & 16) == 0 {
290                         assert!(bs_resp.2.is_none());
291
292                         assert!(as_resp.1.is_some());
293                         assert!(as_resp.2.is_some());
294                         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
295                 } else {
296                         assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
297                         assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
298                         assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
299                         assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
300                         assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
301                         assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
302
303                         assert!(as_resp.1.is_none());
304
305                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
306                         let events_3 = nodes[0].node.get_and_clear_pending_events();
307                         assert_eq!(events_3.len(), 1);
308                         match events_3[0] {
309                                 Event::PaymentSent { ref payment_preimage } => {
310                                         assert_eq!(*payment_preimage, payment_preimage_1);
311                                 },
312                                 _ => panic!("Unexpected event"),
313                         }
314
315                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
316                         let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
317                         // No commitment_signed so get_event_msg's assert(len == 1) passes
318                         check_added_monitors!(nodes[0], 1);
319
320                         as_resp.1 = Some(as_resp_raa);
321                         bs_resp.2 = None;
322                 }
323
324                 if disconnect_count & !disconnect_flags > 1 {
325                         let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
326
327                         if (disconnect_count & 16) == 0 {
328                                 assert!(reestablish_1 == second_reestablish_1);
329                                 assert!(reestablish_2 == second_reestablish_2);
330                         }
331                         assert!(as_resp == second_as_resp);
332                         assert!(bs_resp == second_bs_resp);
333                 }
334
335                 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
336         } else {
337                 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
338                 assert_eq!(events_4.len(), 2);
339                 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
340                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
341                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
342                                 msg.clone()
343                         },
344                         _ => panic!("Unexpected event"),
345                 })
346         };
347
348         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
349
350         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
351         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
352         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
353         // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
354         check_added_monitors!(nodes[1], 1);
355
356         if disconnect_count & !disconnect_flags > 2 {
357                 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
358
359                 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
360                 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
361
362                 assert!(as_resp.2.is_none());
363                 assert!(bs_resp.2.is_none());
364         }
365
366         let as_commitment_update;
367         let bs_second_commitment_update;
368
369         macro_rules! handle_bs_raa { () => {
370                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
371                 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
372                 assert!(as_commitment_update.update_add_htlcs.is_empty());
373                 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
374                 assert!(as_commitment_update.update_fail_htlcs.is_empty());
375                 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
376                 assert!(as_commitment_update.update_fee.is_none());
377                 check_added_monitors!(nodes[0], 1);
378         } }
379
380         macro_rules! handle_initial_raa { () => {
381                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
382                 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
383                 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
384                 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
385                 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
386                 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
387                 assert!(bs_second_commitment_update.update_fee.is_none());
388                 check_added_monitors!(nodes[1], 1);
389         } }
390
391         if (disconnect_count & 8) == 0 {
392                 handle_bs_raa!();
393
394                 if disconnect_count & !disconnect_flags > 3 {
395                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
396
397                         assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
398                         assert!(bs_resp.1.is_none());
399
400                         assert!(as_resp.2.unwrap() == as_commitment_update);
401                         assert!(bs_resp.2.is_none());
402
403                         assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
404                 }
405
406                 handle_initial_raa!();
407
408                 if disconnect_count & !disconnect_flags > 4 {
409                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
410
411                         assert!(as_resp.1.is_none());
412                         assert!(bs_resp.1.is_none());
413
414                         assert!(as_resp.2.unwrap() == as_commitment_update);
415                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
416                 }
417         } else {
418                 handle_initial_raa!();
419
420                 if disconnect_count & !disconnect_flags > 3 {
421                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
422
423                         assert!(as_resp.1.is_none());
424                         assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
425
426                         assert!(as_resp.2.is_none());
427                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
428
429                         assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
430                 }
431
432                 handle_bs_raa!();
433
434                 if disconnect_count & !disconnect_flags > 4 {
435                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
436
437                         assert!(as_resp.1.is_none());
438                         assert!(bs_resp.1.is_none());
439
440                         assert!(as_resp.2.unwrap() == as_commitment_update);
441                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
442                 }
443         }
444
445         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
446         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
447         // No commitment_signed so get_event_msg's assert(len == 1) passes
448         check_added_monitors!(nodes[0], 1);
449
450         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
451         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
452         // No commitment_signed so get_event_msg's assert(len == 1) passes
453         check_added_monitors!(nodes[1], 1);
454
455         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
456         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
457         check_added_monitors!(nodes[1], 1);
458
459         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
460         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
461         check_added_monitors!(nodes[0], 1);
462
463         expect_pending_htlcs_forwardable!(nodes[1]);
464
465         let events_5 = nodes[1].node.get_and_clear_pending_events();
466         assert_eq!(events_5.len(), 1);
467         match events_5[0] {
468                 Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => {
469                         assert_eq!(payment_hash_2, *payment_hash);
470                         assert_eq!(*payment_secret, None);
471                         assert_eq!(amt, 1000000);
472                 },
473                 _ => panic!("Unexpected event"),
474         }
475
476         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
477 }
478
479 #[test]
480 fn test_monitor_temporary_update_fail_a() {
481         do_test_monitor_temporary_update_fail(0);
482         do_test_monitor_temporary_update_fail(1);
483         do_test_monitor_temporary_update_fail(2);
484         do_test_monitor_temporary_update_fail(3);
485         do_test_monitor_temporary_update_fail(4);
486         do_test_monitor_temporary_update_fail(5);
487 }
488
489 #[test]
490 fn test_monitor_temporary_update_fail_b() {
491         do_test_monitor_temporary_update_fail(2 | 8);
492         do_test_monitor_temporary_update_fail(3 | 8);
493         do_test_monitor_temporary_update_fail(4 | 8);
494         do_test_monitor_temporary_update_fail(5 | 8);
495 }
496
497 #[test]
498 fn test_monitor_temporary_update_fail_c() {
499         do_test_monitor_temporary_update_fail(1 | 16);
500         do_test_monitor_temporary_update_fail(2 | 16);
501         do_test_monitor_temporary_update_fail(3 | 16);
502         do_test_monitor_temporary_update_fail(2 | 8 | 16);
503         do_test_monitor_temporary_update_fail(3 | 8 | 16);
504 }
505
506 #[test]
507 fn test_monitor_update_fail_cs() {
508         // Tests handling of a monitor update failure when processing an incoming commitment_signed
509         let chanmon_cfgs = create_chanmon_cfgs(2);
510         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
511         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
512         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
513         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
514         let logger = Arc::new(test_utils::TestLogger::new());
515
516         let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
517         {
518                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
519                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
520                 nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap();
521                 check_added_monitors!(nodes[0], 1);
522         }
523
524         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
525         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
526
527         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
528         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
529         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
530         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
531         check_added_monitors!(nodes[1], 1);
532         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
533
534         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
535         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
536         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
537         check_added_monitors!(nodes[1], 0);
538         let responses = nodes[1].node.get_and_clear_pending_msg_events();
539         assert_eq!(responses.len(), 2);
540
541         match responses[0] {
542                 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
543                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
544                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
545                         check_added_monitors!(nodes[0], 1);
546                 },
547                 _ => panic!("Unexpected event"),
548         }
549         match responses[1] {
550                 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
551                         assert!(updates.update_add_htlcs.is_empty());
552                         assert!(updates.update_fulfill_htlcs.is_empty());
553                         assert!(updates.update_fail_htlcs.is_empty());
554                         assert!(updates.update_fail_malformed_htlcs.is_empty());
555                         assert!(updates.update_fee.is_none());
556                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
557
558                         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
559                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
560                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
561                         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
562                         check_added_monitors!(nodes[0], 1);
563                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
564                 },
565                 _ => panic!("Unexpected event"),
566         }
567
568         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
569         let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
570         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
571         check_added_monitors!(nodes[0], 0);
572
573         let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
574         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
575         check_added_monitors!(nodes[1], 1);
576
577         expect_pending_htlcs_forwardable!(nodes[1]);
578
579         let events = nodes[1].node.get_and_clear_pending_events();
580         assert_eq!(events.len(), 1);
581         match events[0] {
582                 Event::PaymentReceived { payment_hash, payment_secret, amt } => {
583                         assert_eq!(payment_hash, our_payment_hash);
584                         assert_eq!(payment_secret, None);
585                         assert_eq!(amt, 1000000);
586                 },
587                 _ => panic!("Unexpected event"),
588         };
589
590         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage, 1_000_000);
591 }
592
593 #[test]
594 fn test_monitor_update_fail_no_rebroadcast() {
595         // Tests handling of a monitor update failure when no message rebroadcasting on
596         // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
597         // fuzz tests.
598         let chanmon_cfgs = create_chanmon_cfgs(2);
599         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
600         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
601         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
602         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
603         let logger = Arc::new(test_utils::TestLogger::new());
604
605         let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
606         {
607                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
608                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
609                 nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap();
610                 check_added_monitors!(nodes[0], 1);
611         }
612
613         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
614         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
615         let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
616
617         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
618         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
619         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
620         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
621         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
622         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
623         check_added_monitors!(nodes[1], 1);
624
625         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
626         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
627         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
628         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
629         check_added_monitors!(nodes[1], 0);
630         expect_pending_htlcs_forwardable!(nodes[1]);
631
632         let events = nodes[1].node.get_and_clear_pending_events();
633         assert_eq!(events.len(), 1);
634         match events[0] {
635                 Event::PaymentReceived { payment_hash, .. } => {
636                         assert_eq!(payment_hash, our_payment_hash);
637                 },
638                 _ => panic!("Unexpected event"),
639         }
640
641         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
642 }
643
644 #[test]
645 fn test_monitor_update_raa_while_paused() {
646         // Tests handling of an RAA while monitor updating has already been marked failed.
647         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
648         let chanmon_cfgs = create_chanmon_cfgs(2);
649         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
650         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
651         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
652         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
653         let logger = Arc::new(test_utils::TestLogger::new());
654
655         send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000);
656         let (payment_preimage_1, our_payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
657         {
658                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
659                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
660                 nodes[0].node.send_payment(&route, our_payment_hash_1, &None).unwrap();
661                 check_added_monitors!(nodes[0], 1);
662         }
663         let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
664
665         let (payment_preimage_2, our_payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
666         {
667                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
668                 let route = get_route(&nodes[1].node.get_our_node_id(), net_graph_msg_handler, &nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
669                 nodes[1].node.send_payment(&route, our_payment_hash_2, &None).unwrap();
670                 check_added_monitors!(nodes[1], 1);
671         }
672         let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
673
674         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
675         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
676         check_added_monitors!(nodes[1], 1);
677         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
678
679         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
680         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
681         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
682         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
683         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
684         check_added_monitors!(nodes[0], 1);
685
686         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
687         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
688         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
689         check_added_monitors!(nodes[0], 1);
690
691         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
692         let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
693         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
694         check_added_monitors!(nodes[0], 0);
695
696         let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
697         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
698         check_added_monitors!(nodes[1], 1);
699         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
700
701         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
702         check_added_monitors!(nodes[1], 1);
703         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
704
705         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
706         check_added_monitors!(nodes[0], 1);
707         let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
708
709         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
710         check_added_monitors!(nodes[0], 1);
711         expect_pending_htlcs_forwardable!(nodes[0]);
712         expect_payment_received!(nodes[0], our_payment_hash_2, 1000000);
713
714         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
715         check_added_monitors!(nodes[1], 1);
716         expect_pending_htlcs_forwardable!(nodes[1]);
717         expect_payment_received!(nodes[1], our_payment_hash_1, 1000000);
718
719         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
720         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2, 1_000_000);
721 }
722
723 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
724         // Tests handling of a monitor update failure when processing an incoming RAA
725         let chanmon_cfgs = create_chanmon_cfgs(3);
726         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
727         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
728         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
729         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
730         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
731         let logger = Arc::new(test_utils::TestLogger::new());
732
733         // Rebalance a bit so that we can send backwards from 2 to 1.
734         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
735
736         // Route a first payment that we'll fail backwards
737         let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
738
739         // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
740         assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, &None));
741         expect_pending_htlcs_forwardable!(nodes[2]);
742         check_added_monitors!(nodes[2], 1);
743
744         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
745         assert!(updates.update_add_htlcs.is_empty());
746         assert!(updates.update_fulfill_htlcs.is_empty());
747         assert_eq!(updates.update_fail_htlcs.len(), 1);
748         assert!(updates.update_fail_malformed_htlcs.is_empty());
749         assert!(updates.update_fee.is_none());
750         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
751
752         let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
753         check_added_monitors!(nodes[0], 0);
754
755         // While the second channel is AwaitingRAA, forward a second payment to get it into the
756         // holding cell.
757         let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
758         {
759                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
760                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
761                 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
762                 check_added_monitors!(nodes[0], 1);
763         }
764
765         let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
766         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
767         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
768
769         expect_pending_htlcs_forwardable!(nodes[1]);
770         check_added_monitors!(nodes[1], 0);
771         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
772
773         // Now fail monitor updating.
774         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
775         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
776         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
777         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
778         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
779         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
780         check_added_monitors!(nodes[1], 1);
781
782         // Attempt to forward a third payment but fail due to the second channel being unavailable
783         // for forwarding.
784         let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
785         {
786                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
787                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
788                 nodes[0].node.send_payment(&route, payment_hash_3, &None).unwrap();
789                 check_added_monitors!(nodes[0], 1);
790         }
791
792         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
793         send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
794         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
795         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
796         check_added_monitors!(nodes[1], 0);
797
798         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
799         assert_eq!(events_2.len(), 1);
800         match events_2.remove(0) {
801                 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
802                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
803                         assert!(updates.update_fulfill_htlcs.is_empty());
804                         assert_eq!(updates.update_fail_htlcs.len(), 1);
805                         assert!(updates.update_fail_malformed_htlcs.is_empty());
806                         assert!(updates.update_add_htlcs.is_empty());
807                         assert!(updates.update_fee.is_none());
808
809                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
810                         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
811
812                         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
813                         assert_eq!(msg_events.len(), 1);
814                         match msg_events[0] {
815                                 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
816                                         assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
817                                         assert_eq!(msg.contents.flags & 2, 2); // temp disabled
818                                 },
819                                 _ => panic!("Unexpected event"),
820                         }
821
822                         let events = nodes[0].node.get_and_clear_pending_events();
823                         assert_eq!(events.len(), 1);
824                         if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
825                                 assert_eq!(payment_hash, payment_hash_3);
826                                 assert!(!rejected_by_dest);
827                         } else { panic!("Unexpected event!"); }
828                 },
829                 _ => panic!("Unexpected event type!"),
830         };
831
832         let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
833                 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
834                 let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]);
835                 let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
836                 let route = get_route(&nodes[2].node.get_our_node_id(), net_graph_msg_handler, &nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
837                 nodes[2].node.send_payment(&route, payment_hash_4, &None).unwrap();
838                 check_added_monitors!(nodes[2], 1);
839
840                 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
841                 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
842                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
843                 check_added_monitors!(nodes[1], 1);
844                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
845                 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
846                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
847                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
848                 (Some(payment_preimage_4), Some(payment_hash_4))
849         } else { (None, None) };
850
851         // Restore monitor updating, ensuring we immediately get a fail-back update and a
852         // update_add update.
853         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
854         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
855         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
856         check_added_monitors!(nodes[1], 0);
857         expect_pending_htlcs_forwardable!(nodes[1]);
858         check_added_monitors!(nodes[1], 1);
859
860         let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
861         if test_ignore_second_cs {
862                 assert_eq!(events_3.len(), 3);
863         } else {
864                 assert_eq!(events_3.len(), 2);
865         }
866
867         // Note that the ordering of the events for different nodes is non-prescriptive, though the
868         // ordering of the two events that both go to nodes[2] have to stay in the same order.
869         let messages_a = match events_3.pop().unwrap() {
870                 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
871                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
872                         assert!(updates.update_fulfill_htlcs.is_empty());
873                         assert_eq!(updates.update_fail_htlcs.len(), 1);
874                         assert!(updates.update_fail_malformed_htlcs.is_empty());
875                         assert!(updates.update_add_htlcs.is_empty());
876                         assert!(updates.update_fee.is_none());
877                         (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
878                 },
879                 _ => panic!("Unexpected event type!"),
880         };
881         let raa = if test_ignore_second_cs {
882                 match events_3.remove(1) {
883                         MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
884                                 assert_eq!(node_id, nodes[2].node.get_our_node_id());
885                                 Some(msg.clone())
886                         },
887                         _ => panic!("Unexpected event"),
888                 }
889         } else { None };
890         let send_event_b = SendEvent::from_event(events_3.remove(0));
891         assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
892
893         // Now deliver the new messages...
894
895         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
896         commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
897         let events_4 = nodes[0].node.get_and_clear_pending_events();
898         assert_eq!(events_4.len(), 1);
899         if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
900                 assert_eq!(payment_hash, payment_hash_1);
901                 assert!(rejected_by_dest);
902         } else { panic!("Unexpected event!"); }
903
904         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
905         if test_ignore_second_cs {
906                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
907                 check_added_monitors!(nodes[2], 1);
908                 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
909                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
910                 check_added_monitors!(nodes[2], 1);
911                 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
912                 assert!(bs_cs.update_add_htlcs.is_empty());
913                 assert!(bs_cs.update_fail_htlcs.is_empty());
914                 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
915                 assert!(bs_cs.update_fulfill_htlcs.is_empty());
916                 assert!(bs_cs.update_fee.is_none());
917
918                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
919                 check_added_monitors!(nodes[1], 1);
920                 let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
921                 assert!(as_cs.update_add_htlcs.is_empty());
922                 assert!(as_cs.update_fail_htlcs.is_empty());
923                 assert!(as_cs.update_fail_malformed_htlcs.is_empty());
924                 assert!(as_cs.update_fulfill_htlcs.is_empty());
925                 assert!(as_cs.update_fee.is_none());
926
927                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
928                 check_added_monitors!(nodes[1], 1);
929                 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
930
931                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
932                 check_added_monitors!(nodes[2], 1);
933                 let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
934
935                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
936                 check_added_monitors!(nodes[2], 1);
937                 assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
938
939                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
940                 check_added_monitors!(nodes[1], 1);
941                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
942         } else {
943                 commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
944         }
945
946         expect_pending_htlcs_forwardable!(nodes[2]);
947
948         let events_6 = nodes[2].node.get_and_clear_pending_events();
949         assert_eq!(events_6.len(), 1);
950         match events_6[0] {
951                 Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
952                 _ => panic!("Unexpected event"),
953         };
954
955         if test_ignore_second_cs {
956                 expect_pending_htlcs_forwardable!(nodes[1]);
957                 check_added_monitors!(nodes[1], 1);
958
959                 send_event = SendEvent::from_node(&nodes[1]);
960                 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
961                 assert_eq!(send_event.msgs.len(), 1);
962                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
963                 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
964
965                 expect_pending_htlcs_forwardable!(nodes[0]);
966
967                 let events_9 = nodes[0].node.get_and_clear_pending_events();
968                 assert_eq!(events_9.len(), 1);
969                 match events_9[0] {
970                         Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
971                         _ => panic!("Unexpected event"),
972                 };
973                 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap(), 1_000_000);
974         }
975
976         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2, 1_000_000);
977 }
978
979 #[test]
980 fn test_monitor_update_fail_raa() {
981         do_test_monitor_update_fail_raa(false);
982         do_test_monitor_update_fail_raa(true);
983 }
984
985 #[test]
986 fn test_monitor_update_fail_reestablish() {
987         // Simple test for message retransmission after monitor update failure on
988         // channel_reestablish generating a monitor update (which comes from freeing holding cell
989         // HTLCs).
990         let chanmon_cfgs = create_chanmon_cfgs(3);
991         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
992         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
993         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
994         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
995         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
996
997         let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
998
999         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1000         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1001
1002         assert!(nodes[2].node.claim_funds(our_payment_preimage, &None, 1_000_000));
1003         check_added_monitors!(nodes[2], 1);
1004         let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1005         assert!(updates.update_add_htlcs.is_empty());
1006         assert!(updates.update_fail_htlcs.is_empty());
1007         assert!(updates.update_fail_malformed_htlcs.is_empty());
1008         assert!(updates.update_fee.is_none());
1009         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1010         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1011         check_added_monitors!(nodes[1], 1);
1012         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1013         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1014
1015         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1016         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1017         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1018
1019         let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1020         let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1021
1022         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1023
1024         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1025         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1026         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1027         check_added_monitors!(nodes[1], 1);
1028
1029         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1030         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1031
1032         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1033         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1034
1035         assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
1036         assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
1037
1038         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1039
1040         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1041         check_added_monitors!(nodes[1], 0);
1042         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1043
1044         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1045         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1046         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1047         check_added_monitors!(nodes[1], 0);
1048
1049         updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1050         assert!(updates.update_add_htlcs.is_empty());
1051         assert!(updates.update_fail_htlcs.is_empty());
1052         assert!(updates.update_fail_malformed_htlcs.is_empty());
1053         assert!(updates.update_fee.is_none());
1054         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1055         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1056         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1057
1058         let events = nodes[0].node.get_and_clear_pending_events();
1059         assert_eq!(events.len(), 1);
1060         match events[0] {
1061                 Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage),
1062                 _ => panic!("Unexpected event"),
1063         }
1064 }
1065
1066 #[test]
1067 fn raa_no_response_awaiting_raa_state() {
1068         // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1069         // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1070         // in question (assuming it intends to respond with a CS after monitor updating is restored).
1071         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1072         let chanmon_cfgs = create_chanmon_cfgs(2);
1073         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1074         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1075         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1076         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1077         let logger = Arc::new(test_utils::TestLogger::new());
1078
1079         let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1080         let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1081         let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
1082
1083         // Queue up two payments - one will be delivered right away, one immediately goes into the
1084         // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1085         // immediately after a CS. By setting failing the monitor update failure from the CS (which
1086         // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1087         // generation during RAA while in monitor-update-failed state.
1088         {
1089                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1090                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1091                 nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
1092                 check_added_monitors!(nodes[0], 1);
1093                 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1094                 check_added_monitors!(nodes[0], 0);
1095         }
1096
1097         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1098         assert_eq!(events.len(), 1);
1099         let payment_event = SendEvent::from_event(events.pop().unwrap());
1100         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1101         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1102         check_added_monitors!(nodes[1], 1);
1103
1104         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1105         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1106         check_added_monitors!(nodes[0], 1);
1107         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1108         assert_eq!(events.len(), 1);
1109         let payment_event = SendEvent::from_event(events.pop().unwrap());
1110
1111         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1112         check_added_monitors!(nodes[0], 1);
1113         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1114
1115         // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1116         // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1117         // then restore channel monitor updates.
1118         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1119         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1120         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1121         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1122         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1123         check_added_monitors!(nodes[1], 1);
1124
1125         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1126         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1127         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
1128         check_added_monitors!(nodes[1], 1);
1129
1130         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1131         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1132         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1133         // nodes[1] should be AwaitingRAA here!
1134         check_added_monitors!(nodes[1], 0);
1135         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1136         expect_pending_htlcs_forwardable!(nodes[1]);
1137         expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1138
1139         // We send a third payment here, which is somewhat of a redundant test, but the
1140         // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1141         // commitment transaction states) whereas here we can explicitly check for it.
1142         {
1143                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1144                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1145                 nodes[0].node.send_payment(&route, payment_hash_3, &None).unwrap();
1146                 check_added_monitors!(nodes[0], 0);
1147                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1148         }
1149         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1150         check_added_monitors!(nodes[0], 1);
1151         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1152         assert_eq!(events.len(), 1);
1153         let payment_event = SendEvent::from_event(events.pop().unwrap());
1154
1155         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1156         check_added_monitors!(nodes[0], 1);
1157         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1158
1159         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1160         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1161         check_added_monitors!(nodes[1], 1);
1162         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1163
1164         // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1165         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1166         check_added_monitors!(nodes[1], 1);
1167         expect_pending_htlcs_forwardable!(nodes[1]);
1168         expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1169         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1170
1171         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1172         check_added_monitors!(nodes[0], 1);
1173
1174         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1175         check_added_monitors!(nodes[0], 1);
1176         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1177
1178         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1179         check_added_monitors!(nodes[1], 1);
1180         expect_pending_htlcs_forwardable!(nodes[1]);
1181         expect_payment_received!(nodes[1], payment_hash_3, 1000000);
1182
1183         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1184         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1185         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3, 1_000_000);
1186 }
1187
1188 #[test]
1189 fn claim_while_disconnected_monitor_update_fail() {
1190         // Test for claiming a payment while disconnected and then having the resulting
1191         // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1192         // contrived case for nodes with network instability.
1193         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1194         // code introduced a regression in this test (specifically, this caught a removal of the
1195         // channel_reestablish handling ensuring the order was sensical given the messages used).
1196         let chanmon_cfgs = create_chanmon_cfgs(2);
1197         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1198         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1199         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1200         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1201         let logger = Arc::new(test_utils::TestLogger::new());
1202
1203         // Forward a payment for B to claim
1204         let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1205
1206         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1207         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1208
1209         assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1210         check_added_monitors!(nodes[1], 1);
1211
1212         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1213         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1214
1215         let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1216         let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1217
1218         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1219         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1220
1221         // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1222         // update.
1223         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1224
1225         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1226         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1227         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1228         check_added_monitors!(nodes[1], 1);
1229         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1230
1231         // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1232         // the monitor still failed
1233         let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1234         {
1235                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1236                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1237                 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1238                 check_added_monitors!(nodes[0], 1);
1239         }
1240
1241         let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1242         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1243         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1244         check_added_monitors!(nodes[1], 1);
1245         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1246         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1247         // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1248         // until we've channel_monitor_update'd and updated for the new commitment transaction.
1249
1250         // Now un-fail the monitor, which will result in B sending its original commitment update,
1251         // receiving the commitment update from A, and the resulting commitment dances.
1252         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1253         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1254         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1255         check_added_monitors!(nodes[1], 0);
1256
1257         let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1258         assert_eq!(bs_msgs.len(), 2);
1259
1260         match bs_msgs[0] {
1261                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1262                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1263                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1264                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1265                         check_added_monitors!(nodes[0], 1);
1266
1267                         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1268                         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1269                         check_added_monitors!(nodes[1], 1);
1270                 },
1271                 _ => panic!("Unexpected event"),
1272         }
1273
1274         match bs_msgs[1] {
1275                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1276                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1277                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1278                         check_added_monitors!(nodes[0], 1);
1279                 },
1280                 _ => panic!("Unexpected event"),
1281         }
1282
1283         let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1284
1285         let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1286         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1287         check_added_monitors!(nodes[0], 1);
1288         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1289
1290         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1291         check_added_monitors!(nodes[1], 1);
1292         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1293         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1294         check_added_monitors!(nodes[1], 1);
1295
1296         expect_pending_htlcs_forwardable!(nodes[1]);
1297         expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1298
1299         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1300         check_added_monitors!(nodes[0], 1);
1301
1302         let events = nodes[0].node.get_and_clear_pending_events();
1303         assert_eq!(events.len(), 1);
1304         match events[0] {
1305                 Event::PaymentSent { ref payment_preimage } => {
1306                         assert_eq!(*payment_preimage, payment_preimage_1);
1307                 },
1308                 _ => panic!("Unexpected event"),
1309         }
1310
1311         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1312 }
1313
1314 #[test]
1315 fn monitor_failed_no_reestablish_response() {
1316         // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1317         // response to a commitment_signed.
1318         // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1319         // debug_assert!() failure in channel_reestablish handling.
1320         let chanmon_cfgs = create_chanmon_cfgs(2);
1321         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1322         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1323         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1324         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1325         let logger = Arc::new(test_utils::TestLogger::new());
1326
1327         // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1328         // on receipt).
1329         let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1330         {
1331                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1332                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1333                 nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
1334                 check_added_monitors!(nodes[0], 1);
1335         }
1336
1337         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1338         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1339         assert_eq!(events.len(), 1);
1340         let payment_event = SendEvent::from_event(events.pop().unwrap());
1341         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1342         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1343         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1344         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1345         check_added_monitors!(nodes[1], 1);
1346
1347         // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1348         // is still failing to update monitors.
1349         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1350         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1351
1352         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1353         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1354
1355         let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1356         let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1357
1358         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1359         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1360
1361         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1362         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1363         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1364         check_added_monitors!(nodes[1], 0);
1365         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1366
1367         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1368         check_added_monitors!(nodes[0], 1);
1369         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1370         check_added_monitors!(nodes[0], 1);
1371
1372         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1373         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1374         check_added_monitors!(nodes[1], 1);
1375
1376         expect_pending_htlcs_forwardable!(nodes[1]);
1377         expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1378
1379         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1380 }
1381
1382 #[test]
1383 fn first_message_on_recv_ordering() {
1384         // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1385         // messages, we're willing to flip the order of response messages if neccessary in resposne to
1386         // a commitment_signed which needs to send an RAA first.
1387         // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1388         // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1389         // response. To do this, we start routing two payments, with the final RAA for the first being
1390         // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1391         // have no pending response but will want to send a RAA/CS (with the updates for the second
1392         // payment applied).
1393         // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1394         let chanmon_cfgs = create_chanmon_cfgs(2);
1395         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1396         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1397         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1398         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1399         let logger = Arc::new(test_utils::TestLogger::new());
1400
1401         // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1402         // can deliver it and fail the monitor update.
1403         let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1404         {
1405                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1406                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1407                 nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
1408                 check_added_monitors!(nodes[0], 1);
1409         }
1410
1411         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1412         assert_eq!(events.len(), 1);
1413         let payment_event = SendEvent::from_event(events.pop().unwrap());
1414         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1415         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1416         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1417         check_added_monitors!(nodes[1], 1);
1418         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1419
1420         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1421         check_added_monitors!(nodes[0], 1);
1422         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1423         check_added_monitors!(nodes[0], 1);
1424
1425         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1426
1427         // Route the second payment, generating an update_add_htlc/commitment_signed
1428         let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1429         {
1430                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1431                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1432                 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1433                 check_added_monitors!(nodes[0], 1);
1434         }
1435         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1436         assert_eq!(events.len(), 1);
1437         let payment_event = SendEvent::from_event(events.pop().unwrap());
1438         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1439
1440         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1441
1442         // Deliver the final RAA for the first payment, which does not require a response. RAAs
1443         // generally require a commitment_signed, so the fact that we're expecting an opposite response
1444         // to the next message also tests resetting the delivery order.
1445         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1446         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1447         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1448         check_added_monitors!(nodes[1], 1);
1449
1450         // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1451         // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1452         // appropriate HTLC acceptance).
1453         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1454         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1455         check_added_monitors!(nodes[1], 1);
1456         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1457         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1458
1459         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1460         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1461         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1462         check_added_monitors!(nodes[1], 0);
1463
1464         expect_pending_htlcs_forwardable!(nodes[1]);
1465         expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1466
1467         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1468         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1469         check_added_monitors!(nodes[0], 1);
1470         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1471         check_added_monitors!(nodes[0], 1);
1472
1473         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1474         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1475         check_added_monitors!(nodes[1], 1);
1476
1477         expect_pending_htlcs_forwardable!(nodes[1]);
1478         expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1479
1480         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1481         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1482 }
1483
1484 #[test]
1485 fn test_monitor_update_fail_claim() {
1486         // Basic test for monitor update failures when processing claim_funds calls.
1487         // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1488         // update to claim the payment. We then send a payment C->B->A, making the forward of this
1489         // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
1490         // updating and claim the payment on B.
1491         let chanmon_cfgs = create_chanmon_cfgs(3);
1492         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1493         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1494         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1495         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1496         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1497         let logger = Arc::new(test_utils::TestLogger::new());
1498
1499         // Rebalance a bit so that we can send backwards from 3 to 2.
1500         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
1501
1502         let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1503
1504         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1505         assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1506         check_added_monitors!(nodes[1], 1);
1507
1508         let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1509         {
1510                 let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
1511                 let route = get_route(&nodes[2].node.get_our_node_id(), net_graph_msg_handler, &nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1512                 nodes[2].node.send_payment(&route, payment_hash_2, &None).unwrap();
1513                 check_added_monitors!(nodes[2], 1);
1514         }
1515
1516         // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1517         // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1518         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1519
1520         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1521         assert_eq!(events.len(), 1);
1522         let payment_event = SendEvent::from_event(events.pop().unwrap());
1523         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1524         let events = nodes[1].node.get_and_clear_pending_msg_events();
1525         assert_eq!(events.len(), 0);
1526         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1527         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1528
1529         let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1530         nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
1531         commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
1532
1533         let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
1534         assert_eq!(msg_events.len(), 1);
1535         match msg_events[0] {
1536                 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
1537                         assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
1538                         assert_eq!(msg.contents.flags & 2, 2); // temp disabled
1539                 },
1540                 _ => panic!("Unexpected event"),
1541         }
1542
1543         let events = nodes[2].node.get_and_clear_pending_events();
1544         assert_eq!(events.len(), 1);
1545         if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1546                 assert_eq!(payment_hash, payment_hash_2);
1547                 assert!(!rejected_by_dest);
1548         } else { panic!("Unexpected event!"); }
1549
1550         // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1551         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1552         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1553         check_added_monitors!(nodes[1], 0);
1554
1555         let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1556         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1557         commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1558
1559         let events = nodes[0].node.get_and_clear_pending_events();
1560         assert_eq!(events.len(), 1);
1561         if let Event::PaymentSent { payment_preimage, .. } = events[0] {
1562                 assert_eq!(payment_preimage, payment_preimage_1);
1563         } else { panic!("Unexpected event!"); }
1564 }
1565
1566 #[test]
1567 fn test_monitor_update_on_pending_forwards() {
1568         // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1569         // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1570         // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1571         // from C to A will be pending a forward to A.
1572         let chanmon_cfgs = create_chanmon_cfgs(3);
1573         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1574         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1575         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1576         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1577         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1578         let logger = Arc::new(test_utils::TestLogger::new());
1579
1580         // Rebalance a bit so that we can send backwards from 3 to 1.
1581         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
1582
1583         let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1584         assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, &None));
1585         expect_pending_htlcs_forwardable!(nodes[2]);
1586         check_added_monitors!(nodes[2], 1);
1587
1588         let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1589         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1590         commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1591         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1592
1593         let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1594         {
1595                 let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
1596                 let route = get_route(&nodes[2].node.get_our_node_id(), net_graph_msg_handler, &nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1597                 nodes[2].node.send_payment(&route, payment_hash_2, &None).unwrap();
1598                 check_added_monitors!(nodes[2], 1);
1599         }
1600
1601         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1602         assert_eq!(events.len(), 1);
1603         let payment_event = SendEvent::from_event(events.pop().unwrap());
1604         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1605         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1606
1607         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1608         expect_pending_htlcs_forwardable!(nodes[1]);
1609         check_added_monitors!(nodes[1], 1);
1610         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1611         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1612
1613         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1614         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1615         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1616         check_added_monitors!(nodes[1], 0);
1617
1618         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1619         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1620         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1621         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1622
1623         let events = nodes[0].node.get_and_clear_pending_events();
1624         assert_eq!(events.len(), 2);
1625         if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1626                 assert_eq!(payment_hash, payment_hash_1);
1627                 assert!(rejected_by_dest);
1628         } else { panic!("Unexpected event!"); }
1629         match events[1] {
1630                 Event::PendingHTLCsForwardable { .. } => { },
1631                 _ => panic!("Unexpected event"),
1632         };
1633         nodes[0].node.process_pending_htlc_forwards();
1634         expect_payment_received!(nodes[0], payment_hash_2, 1000000);
1635
1636         claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2, 1_000_000);
1637 }
1638
1639 #[test]
1640 fn monitor_update_claim_fail_no_response() {
1641         // Test for claim_funds resulting in both a monitor update failure and no message response (due
1642         // to channel being AwaitingRAA).
1643         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1644         // code was broken.
1645         let chanmon_cfgs = create_chanmon_cfgs(2);
1646         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1647         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1648         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1649         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1650         let logger = Arc::new(test_utils::TestLogger::new());
1651
1652         // Forward a payment for B to claim
1653         let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1654
1655         // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1656         let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1657         {
1658                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1659                 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1660                 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1661                 check_added_monitors!(nodes[0], 1);
1662         }
1663
1664         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1665         assert_eq!(events.len(), 1);
1666         let payment_event = SendEvent::from_event(events.pop().unwrap());
1667         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1668         let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1669
1670         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1671         assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1672         check_added_monitors!(nodes[1], 1);
1673         let events = nodes[1].node.get_and_clear_pending_msg_events();
1674         assert_eq!(events.len(), 0);
1675         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1676
1677         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1678         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1679         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1680         check_added_monitors!(nodes[1], 0);
1681         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1682
1683         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1684         check_added_monitors!(nodes[1], 1);
1685         expect_pending_htlcs_forwardable!(nodes[1]);
1686         expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1687
1688         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1689         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1690         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1691
1692         let events = nodes[0].node.get_and_clear_pending_events();
1693         assert_eq!(events.len(), 1);
1694         match events[0] {
1695                 Event::PaymentSent { ref payment_preimage } => {
1696                         assert_eq!(*payment_preimage, payment_preimage_1);
1697                 },
1698                 _ => panic!("Unexpected event"),
1699         }
1700
1701         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1702 }
1703
1704 // confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and
1705 // restore_b_before_conf has no meaning if !confirm_a_first
1706 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool) {
1707         // Test that if the monitor update generated by funding_transaction_generated fails we continue
1708         // the channel setup happily after the update is restored.
1709         let chanmon_cfgs = create_chanmon_cfgs(2);
1710         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1711         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1712         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1713
1714         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1715         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1716         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1717
1718         let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
1719
1720         nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
1721         check_added_monitors!(nodes[0], 0);
1722
1723         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1724         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1725         let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1726         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1727         check_added_monitors!(nodes[1], 1);
1728
1729         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1730         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1731         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1732         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1733         check_added_monitors!(nodes[0], 1);
1734         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1735         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1736         let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1737         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
1738         check_added_monitors!(nodes[0], 0);
1739
1740         let events = nodes[0].node.get_and_clear_pending_events();
1741         assert_eq!(events.len(), 1);
1742         match events[0] {
1743                 Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
1744                         assert_eq!(user_channel_id, 43);
1745                         assert_eq!(*funding_txo, funding_output);
1746                 },
1747                 _ => panic!("Unexpected event"),
1748         };
1749
1750         if confirm_a_first {
1751                 confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1752                 nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
1753         } else {
1754                 assert!(!restore_b_before_conf);
1755                 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1756                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1757         }
1758
1759         // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
1760         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1761         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1762         reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1763         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1764         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1765
1766         if !restore_b_before_conf {
1767                 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1768                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1769                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1770         }
1771
1772         *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1773         let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1774         nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1775         check_added_monitors!(nodes[1], 0);
1776
1777         let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1778                 nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
1779
1780                 confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1781                 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1782                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
1783         } else {
1784                 if restore_b_before_conf {
1785                         confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1786                 }
1787                 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1788                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
1789         };
1790         for node in nodes.iter() {
1791                 assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
1792                 node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
1793                 node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
1794         }
1795
1796         send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000);
1797         close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1798 }
1799
1800 #[test]
1801 fn during_funding_monitor_fail() {
1802         do_during_funding_monitor_fail(true, true);
1803         do_during_funding_monitor_fail(true, false);
1804         do_during_funding_monitor_fail(false, false);
1805 }
1806
1807 #[test]
1808 fn test_path_paused_mpp() {
1809         // Simple test of sending a multi-part payment where one path is currently blocked awaiting
1810         // monitor update
1811         let chanmon_cfgs = create_chanmon_cfgs(4);
1812         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1813         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1814         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1815
1816         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
1817         let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
1818         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
1819         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
1820         let logger = Arc::new(test_utils::TestLogger::new());
1821
1822         let (payment_preimage, payment_hash) = get_payment_preimage_hash!(&nodes[0]);
1823         let payment_secret = PaymentSecret([0xdb; 32]);
1824         let mut route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler, &nodes[3].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1825
1826         // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
1827         let path = route.paths[0].clone();
1828         route.paths.push(path);
1829         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
1830         route.paths[0][0].short_channel_id = chan_1_id;
1831         route.paths[0][1].short_channel_id = chan_3_id;
1832         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
1833         route.paths[1][0].short_channel_id = chan_2_ann.contents.short_channel_id;
1834         route.paths[1][1].short_channel_id = chan_4_id;
1835
1836         // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
1837         // (for the path 0 -> 2 -> 3) fails.
1838         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1839         *nodes[0].chan_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1840
1841         // Now check that we get the right return value, indicating that the first path succeeded but
1842         // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
1843         // some paths succeeded, preventing retry.
1844         if let Err(PaymentSendFailure::PartialFailure(results)) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) {
1845                 assert_eq!(results.len(), 2);
1846                 if let Ok(()) = results[0] {} else { panic!(); }
1847                 if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
1848         } else { panic!(); }
1849         check_added_monitors!(nodes[0], 2);
1850         *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1851
1852         // Pass the first HTLC of the payment along to nodes[3].
1853         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1854         assert_eq!(events.len(), 1);
1855         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false);
1856
1857         // And check that, after we successfully update the monitor for chan_2 we can pass the second
1858         // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
1859         let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
1860         nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
1861         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1862         assert_eq!(events.len(), 1);
1863         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true);
1864
1865         claim_payment_along_route_with_secret(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage, Some(payment_secret), 200_000);
1866 }