1 //! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
3 //! There are a bunch of these as their handling is relatively error-prone so they are split out
4 //! here. See also the chanmon_fail_consistency fuzz test.
6 use chain::transaction::OutPoint;
7 use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash};
8 use ln::channelmonitor::ChannelMonitorUpdateErr;
9 use ln::features::InitFeatures;
11 use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler};
12 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
13 use util::errors::APIError;
15 use bitcoin_hashes::sha256::Hash as Sha256;
16 use bitcoin_hashes::Hash;
18 use ln::functional_test_utils::*;
21 fn test_simple_monitor_permanent_update_fail() {
22 // Test that we handle a simple permanent monitor update failure
23 let chanmon_cfgs = create_chanmon_cfgs(2);
24 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
25 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
26 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
27 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
29 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
30 let (_, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
32 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
33 if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1, &None) {} else { panic!(); }
34 check_added_monitors!(nodes[0], 2);
36 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
37 assert_eq!(events_1.len(), 2);
39 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
40 _ => panic!("Unexpected event"),
43 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
44 _ => panic!("Unexpected event"),
47 // TODO: Once we hit the chain with the failure transaction we should check that we get a
48 // PaymentFailed event
50 assert_eq!(nodes[0].node.list_channels().len(), 0);
53 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
54 // Test that we can recover from a simple temporary monitor update failure optionally with
55 // a disconnect in between
56 let chanmon_cfgs = create_chanmon_cfgs(2);
57 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
58 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
59 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
60 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
62 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
63 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
65 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
66 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1, &None) {} else { panic!(); }
67 check_added_monitors!(nodes[0], 1);
69 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
70 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
71 assert_eq!(nodes[0].node.list_channels().len(), 1);
74 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
75 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
76 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
79 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
80 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
81 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
82 check_added_monitors!(nodes[0], 0);
84 let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
85 assert_eq!(events_2.len(), 1);
86 let payment_event = SendEvent::from_event(events_2.pop().unwrap());
87 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
88 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
89 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
91 expect_pending_htlcs_forwardable!(nodes[1]);
93 let events_3 = nodes[1].node.get_and_clear_pending_events();
94 assert_eq!(events_3.len(), 1);
96 Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => {
97 assert_eq!(payment_hash_1, *payment_hash);
98 assert_eq!(*payment_secret, None);
99 assert_eq!(amt, 1000000);
101 _ => panic!("Unexpected event"),
104 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
106 // Now set it to failed again...
107 let (_, payment_hash_2) = get_payment_preimage_hash!(&nodes[0]);
108 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
109 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2, &None) {} else { panic!(); }
110 check_added_monitors!(nodes[0], 1);
112 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
113 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
114 assert_eq!(nodes[0].node.list_channels().len(), 1);
117 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
118 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
119 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
122 // ...and make sure we can force-close a frozen channel
123 nodes[0].node.force_close_channel(&channel_id);
124 check_added_monitors!(nodes[0], 1);
125 check_closed_broadcast!(nodes[0], false);
127 // TODO: Once we hit the chain with the failure transaction we should check that we get a
128 // PaymentFailed event
130 assert_eq!(nodes[0].node.list_channels().len(), 0);
134 fn test_simple_monitor_temporary_update_fail() {
135 do_test_simple_monitor_temporary_update_fail(false);
136 do_test_simple_monitor_temporary_update_fail(true);
139 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
140 let disconnect_flags = 8 | 16;
142 // Test that we can recover from a temporary monitor update failure with some in-flight
143 // HTLCs going on at the same time potentially with some disconnection thrown in.
144 // * First we route a payment, then get a temporary monitor update failure when trying to
145 // route a second payment. We then claim the first payment.
146 // * If disconnect_count is set, we will disconnect at this point (which is likely as
147 // TemporaryFailure likely indicates net disconnect which resulted in failing to update
148 // the ChannelMonitor on a watchtower).
149 // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
150 // immediately, otherwise we wait disconnect and deliver them via the reconnect
151 // channel_reestablish processing (ie disconnect_count & 16 makes no sense if
152 // disconnect_count & !disconnect_flags is 0).
153 // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
154 // through message sending, potentially disconnect/reconnecting multiple times based on
155 // disconnect_count, to get the update_fulfill_htlc through.
156 // * We then walk through more message exchanges to get the original update_add_htlc
157 // through, swapping message ordering based on disconnect_count & 8 and optionally
158 // disconnect/reconnecting based on disconnect_count.
159 let chanmon_cfgs = create_chanmon_cfgs(2);
160 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
161 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
162 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
163 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
165 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
167 // Now try to send a second payment which will fail to send
168 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
169 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
171 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
172 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2, &None) {} else { panic!(); }
173 check_added_monitors!(nodes[0], 1);
175 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
176 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
177 assert_eq!(nodes[0].node.list_channels().len(), 1);
179 // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
180 // but nodes[0] won't respond since it is frozen.
181 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
182 check_added_monitors!(nodes[1], 1);
183 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
184 assert_eq!(events_2.len(), 1);
185 let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
186 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
187 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
188 assert!(update_add_htlcs.is_empty());
189 assert_eq!(update_fulfill_htlcs.len(), 1);
190 assert!(update_fail_htlcs.is_empty());
191 assert!(update_fail_malformed_htlcs.is_empty());
192 assert!(update_fee.is_none());
194 if (disconnect_count & 16) == 0 {
195 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
196 let events_3 = nodes[0].node.get_and_clear_pending_events();
197 assert_eq!(events_3.len(), 1);
199 Event::PaymentSent { ref payment_preimage } => {
200 assert_eq!(*payment_preimage, payment_preimage_1);
202 _ => panic!("Unexpected event"),
205 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
206 check_added_monitors!(nodes[0], 1);
207 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
208 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
211 (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
213 _ => panic!("Unexpected event"),
216 if disconnect_count & !disconnect_flags > 0 {
217 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
218 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
221 // Now fix monitor updating...
222 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
223 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
224 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
225 check_added_monitors!(nodes[0], 0);
227 macro_rules! disconnect_reconnect_peers { () => { {
228 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
229 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
231 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
232 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
233 assert_eq!(reestablish_1.len(), 1);
234 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
235 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
236 assert_eq!(reestablish_2.len(), 1);
238 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
239 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
240 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
241 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
243 assert!(as_resp.0.is_none());
244 assert!(bs_resp.0.is_none());
246 (reestablish_1, reestablish_2, as_resp, bs_resp)
249 let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
250 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
251 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
253 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
254 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
255 assert_eq!(reestablish_1.len(), 1);
256 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
257 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
258 assert_eq!(reestablish_2.len(), 1);
260 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
261 check_added_monitors!(nodes[0], 0);
262 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
263 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
264 check_added_monitors!(nodes[1], 0);
265 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
267 assert!(as_resp.0.is_none());
268 assert!(bs_resp.0.is_none());
270 assert!(bs_resp.1.is_none());
271 if (disconnect_count & 16) == 0 {
272 assert!(bs_resp.2.is_none());
274 assert!(as_resp.1.is_some());
275 assert!(as_resp.2.is_some());
276 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
278 assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
279 assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
280 assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
281 assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
282 assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
283 assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
285 assert!(as_resp.1.is_none());
287 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
288 let events_3 = nodes[0].node.get_and_clear_pending_events();
289 assert_eq!(events_3.len(), 1);
291 Event::PaymentSent { ref payment_preimage } => {
292 assert_eq!(*payment_preimage, payment_preimage_1);
294 _ => panic!("Unexpected event"),
297 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
298 let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
299 // No commitment_signed so get_event_msg's assert(len == 1) passes
300 check_added_monitors!(nodes[0], 1);
302 as_resp.1 = Some(as_resp_raa);
306 if disconnect_count & !disconnect_flags > 1 {
307 let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
309 if (disconnect_count & 16) == 0 {
310 assert!(reestablish_1 == second_reestablish_1);
311 assert!(reestablish_2 == second_reestablish_2);
313 assert!(as_resp == second_as_resp);
314 assert!(bs_resp == second_bs_resp);
317 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
319 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
320 assert_eq!(events_4.len(), 2);
321 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
322 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
323 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
326 _ => panic!("Unexpected event"),
330 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
332 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
333 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
334 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
335 // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
336 check_added_monitors!(nodes[1], 1);
338 if disconnect_count & !disconnect_flags > 2 {
339 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
341 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
342 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
344 assert!(as_resp.2.is_none());
345 assert!(bs_resp.2.is_none());
348 let as_commitment_update;
349 let bs_second_commitment_update;
351 macro_rules! handle_bs_raa { () => {
352 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
353 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
354 assert!(as_commitment_update.update_add_htlcs.is_empty());
355 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
356 assert!(as_commitment_update.update_fail_htlcs.is_empty());
357 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
358 assert!(as_commitment_update.update_fee.is_none());
359 check_added_monitors!(nodes[0], 1);
362 macro_rules! handle_initial_raa { () => {
363 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
364 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
365 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
366 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
367 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
368 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
369 assert!(bs_second_commitment_update.update_fee.is_none());
370 check_added_monitors!(nodes[1], 1);
373 if (disconnect_count & 8) == 0 {
376 if disconnect_count & !disconnect_flags > 3 {
377 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
379 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
380 assert!(bs_resp.1.is_none());
382 assert!(as_resp.2.unwrap() == as_commitment_update);
383 assert!(bs_resp.2.is_none());
385 assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
388 handle_initial_raa!();
390 if disconnect_count & !disconnect_flags > 4 {
391 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
393 assert!(as_resp.1.is_none());
394 assert!(bs_resp.1.is_none());
396 assert!(as_resp.2.unwrap() == as_commitment_update);
397 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
400 handle_initial_raa!();
402 if disconnect_count & !disconnect_flags > 3 {
403 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
405 assert!(as_resp.1.is_none());
406 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
408 assert!(as_resp.2.is_none());
409 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
411 assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
416 if disconnect_count & !disconnect_flags > 4 {
417 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
419 assert!(as_resp.1.is_none());
420 assert!(bs_resp.1.is_none());
422 assert!(as_resp.2.unwrap() == as_commitment_update);
423 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
427 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
428 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
429 // No commitment_signed so get_event_msg's assert(len == 1) passes
430 check_added_monitors!(nodes[0], 1);
432 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
433 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
434 // No commitment_signed so get_event_msg's assert(len == 1) passes
435 check_added_monitors!(nodes[1], 1);
437 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
438 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
439 check_added_monitors!(nodes[1], 1);
441 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
442 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
443 check_added_monitors!(nodes[0], 1);
445 expect_pending_htlcs_forwardable!(nodes[1]);
447 let events_5 = nodes[1].node.get_and_clear_pending_events();
448 assert_eq!(events_5.len(), 1);
450 Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => {
451 assert_eq!(payment_hash_2, *payment_hash);
452 assert_eq!(*payment_secret, None);
453 assert_eq!(amt, 1000000);
455 _ => panic!("Unexpected event"),
458 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
462 fn test_monitor_temporary_update_fail_a() {
463 do_test_monitor_temporary_update_fail(0);
464 do_test_monitor_temporary_update_fail(1);
465 do_test_monitor_temporary_update_fail(2);
466 do_test_monitor_temporary_update_fail(3);
467 do_test_monitor_temporary_update_fail(4);
468 do_test_monitor_temporary_update_fail(5);
472 fn test_monitor_temporary_update_fail_b() {
473 do_test_monitor_temporary_update_fail(2 | 8);
474 do_test_monitor_temporary_update_fail(3 | 8);
475 do_test_monitor_temporary_update_fail(4 | 8);
476 do_test_monitor_temporary_update_fail(5 | 8);
480 fn test_monitor_temporary_update_fail_c() {
481 do_test_monitor_temporary_update_fail(1 | 16);
482 do_test_monitor_temporary_update_fail(2 | 16);
483 do_test_monitor_temporary_update_fail(3 | 16);
484 do_test_monitor_temporary_update_fail(2 | 8 | 16);
485 do_test_monitor_temporary_update_fail(3 | 8 | 16);
489 fn test_monitor_update_fail_cs() {
490 // Tests handling of a monitor update failure when processing an incoming commitment_signed
491 let chanmon_cfgs = create_chanmon_cfgs(2);
492 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
493 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
494 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
495 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
497 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
498 let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
499 nodes[0].node.send_payment(route, our_payment_hash, &None).unwrap();
500 check_added_monitors!(nodes[0], 1);
502 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
503 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
505 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
506 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
507 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
508 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
509 check_added_monitors!(nodes[1], 1);
510 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
512 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
513 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
514 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
515 check_added_monitors!(nodes[1], 0);
516 let responses = nodes[1].node.get_and_clear_pending_msg_events();
517 assert_eq!(responses.len(), 2);
520 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
521 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
522 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
523 check_added_monitors!(nodes[0], 1);
525 _ => panic!("Unexpected event"),
528 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
529 assert!(updates.update_add_htlcs.is_empty());
530 assert!(updates.update_fulfill_htlcs.is_empty());
531 assert!(updates.update_fail_htlcs.is_empty());
532 assert!(updates.update_fail_malformed_htlcs.is_empty());
533 assert!(updates.update_fee.is_none());
534 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
536 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
537 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
538 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
539 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
540 check_added_monitors!(nodes[0], 1);
541 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
543 _ => panic!("Unexpected event"),
546 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
547 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
548 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
549 check_added_monitors!(nodes[0], 0);
551 let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
552 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
553 check_added_monitors!(nodes[1], 1);
555 expect_pending_htlcs_forwardable!(nodes[1]);
557 let events = nodes[1].node.get_and_clear_pending_events();
558 assert_eq!(events.len(), 1);
560 Event::PaymentReceived { payment_hash, payment_secret, amt } => {
561 assert_eq!(payment_hash, our_payment_hash);
562 assert_eq!(payment_secret, None);
563 assert_eq!(amt, 1000000);
565 _ => panic!("Unexpected event"),
568 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage, 1_000_000);
572 fn test_monitor_update_fail_no_rebroadcast() {
573 // Tests handling of a monitor update failure when no message rebroadcasting on
574 // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
576 let chanmon_cfgs = create_chanmon_cfgs(2);
577 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
578 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
579 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
580 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
582 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
583 let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
584 nodes[0].node.send_payment(route, our_payment_hash, &None).unwrap();
585 check_added_monitors!(nodes[0], 1);
587 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
588 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
589 let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
591 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
592 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
593 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
594 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
595 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
596 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
597 check_added_monitors!(nodes[1], 1);
599 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
600 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
601 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
602 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
603 check_added_monitors!(nodes[1], 0);
604 expect_pending_htlcs_forwardable!(nodes[1]);
606 let events = nodes[1].node.get_and_clear_pending_events();
607 assert_eq!(events.len(), 1);
609 Event::PaymentReceived { payment_hash, .. } => {
610 assert_eq!(payment_hash, our_payment_hash);
612 _ => panic!("Unexpected event"),
615 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
619 fn test_monitor_update_raa_while_paused() {
620 // Tests handling of an RAA while monitor updating has already been marked failed.
621 // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
622 let chanmon_cfgs = create_chanmon_cfgs(2);
623 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
624 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
625 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
626 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
628 send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000);
630 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
631 let (payment_preimage_1, our_payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
632 nodes[0].node.send_payment(route, our_payment_hash_1, &None).unwrap();
633 check_added_monitors!(nodes[0], 1);
634 let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
636 let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
637 let (payment_preimage_2, our_payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
638 nodes[1].node.send_payment(route, our_payment_hash_2, &None).unwrap();
639 check_added_monitors!(nodes[1], 1);
640 let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
642 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
643 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
644 check_added_monitors!(nodes[1], 1);
645 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
647 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
648 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
649 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
650 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
651 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
652 check_added_monitors!(nodes[0], 1);
654 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
655 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
656 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
657 check_added_monitors!(nodes[0], 1);
659 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
660 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
661 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
662 check_added_monitors!(nodes[0], 0);
664 let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
665 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
666 check_added_monitors!(nodes[1], 1);
667 let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
669 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
670 check_added_monitors!(nodes[1], 1);
671 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
673 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
674 check_added_monitors!(nodes[0], 1);
675 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
677 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
678 check_added_monitors!(nodes[0], 1);
679 expect_pending_htlcs_forwardable!(nodes[0]);
680 expect_payment_received!(nodes[0], our_payment_hash_2, 1000000);
682 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
683 check_added_monitors!(nodes[1], 1);
684 expect_pending_htlcs_forwardable!(nodes[1]);
685 expect_payment_received!(nodes[1], our_payment_hash_1, 1000000);
687 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
688 claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2, 1_000_000);
691 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
692 // Tests handling of a monitor update failure when processing an incoming RAA
693 let chanmon_cfgs = create_chanmon_cfgs(3);
694 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
695 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
696 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
697 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
698 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
700 // Rebalance a bit so that we can send backwards from 2 to 1.
701 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
703 // Route a first payment that we'll fail backwards
704 let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
706 // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
707 assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, &None));
708 expect_pending_htlcs_forwardable!(nodes[2]);
709 check_added_monitors!(nodes[2], 1);
711 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
712 assert!(updates.update_add_htlcs.is_empty());
713 assert!(updates.update_fulfill_htlcs.is_empty());
714 assert_eq!(updates.update_fail_htlcs.len(), 1);
715 assert!(updates.update_fail_malformed_htlcs.is_empty());
716 assert!(updates.update_fee.is_none());
717 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
719 let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
720 check_added_monitors!(nodes[0], 0);
722 // While the second channel is AwaitingRAA, forward a second payment to get it into the
724 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
725 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
726 nodes[0].node.send_payment(route, payment_hash_2, &None).unwrap();
727 check_added_monitors!(nodes[0], 1);
729 let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
730 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
731 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
733 expect_pending_htlcs_forwardable!(nodes[1]);
734 check_added_monitors!(nodes[1], 0);
735 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
737 // Now fail monitor updating.
738 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
739 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
740 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
741 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
742 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
743 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
744 check_added_monitors!(nodes[1], 1);
746 // Attempt to forward a third payment but fail due to the second channel being unavailable
749 let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
750 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
751 nodes[0].node.send_payment(route, payment_hash_3, &None).unwrap();
752 check_added_monitors!(nodes[0], 1);
754 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
755 send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
756 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
757 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
758 check_added_monitors!(nodes[1], 0);
760 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
761 assert_eq!(events_2.len(), 1);
762 match events_2.remove(0) {
763 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
764 assert_eq!(node_id, nodes[0].node.get_our_node_id());
765 assert!(updates.update_fulfill_htlcs.is_empty());
766 assert_eq!(updates.update_fail_htlcs.len(), 1);
767 assert!(updates.update_fail_malformed_htlcs.is_empty());
768 assert!(updates.update_add_htlcs.is_empty());
769 assert!(updates.update_fee.is_none());
771 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
772 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
774 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
775 assert_eq!(msg_events.len(), 1);
776 match msg_events[0] {
777 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
778 assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
779 assert_eq!(msg.contents.flags & 2, 2); // temp disabled
781 _ => panic!("Unexpected event"),
784 let events = nodes[0].node.get_and_clear_pending_events();
785 assert_eq!(events.len(), 1);
786 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
787 assert_eq!(payment_hash, payment_hash_3);
788 assert!(!rejected_by_dest);
789 } else { panic!("Unexpected event!"); }
791 _ => panic!("Unexpected event type!"),
794 let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
795 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
796 let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]);
797 let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
798 nodes[2].node.send_payment(route, payment_hash_4, &None).unwrap();
799 check_added_monitors!(nodes[2], 1);
801 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
802 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
803 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
804 check_added_monitors!(nodes[1], 1);
805 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
806 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
807 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
808 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
809 (Some(payment_preimage_4), Some(payment_hash_4))
810 } else { (None, None) };
812 // Restore monitor updating, ensuring we immediately get a fail-back update and a
813 // update_add update.
814 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
815 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
816 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
817 check_added_monitors!(nodes[1], 0);
818 expect_pending_htlcs_forwardable!(nodes[1]);
819 check_added_monitors!(nodes[1], 1);
821 let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
822 if test_ignore_second_cs {
823 assert_eq!(events_3.len(), 3);
825 assert_eq!(events_3.len(), 2);
828 // Note that the ordering of the events for different nodes is non-prescriptive, though the
829 // ordering of the two events that both go to nodes[2] have to stay in the same order.
830 let messages_a = match events_3.pop().unwrap() {
831 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
832 assert_eq!(node_id, nodes[0].node.get_our_node_id());
833 assert!(updates.update_fulfill_htlcs.is_empty());
834 assert_eq!(updates.update_fail_htlcs.len(), 1);
835 assert!(updates.update_fail_malformed_htlcs.is_empty());
836 assert!(updates.update_add_htlcs.is_empty());
837 assert!(updates.update_fee.is_none());
838 (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
840 _ => panic!("Unexpected event type!"),
842 let raa = if test_ignore_second_cs {
843 match events_3.remove(1) {
844 MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
845 assert_eq!(node_id, nodes[2].node.get_our_node_id());
848 _ => panic!("Unexpected event"),
851 let send_event_b = SendEvent::from_event(events_3.remove(0));
852 assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
854 // Now deliver the new messages...
856 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
857 commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
858 let events_4 = nodes[0].node.get_and_clear_pending_events();
859 assert_eq!(events_4.len(), 1);
860 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
861 assert_eq!(payment_hash, payment_hash_1);
862 assert!(rejected_by_dest);
863 } else { panic!("Unexpected event!"); }
865 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
866 if test_ignore_second_cs {
867 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
868 check_added_monitors!(nodes[2], 1);
869 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
870 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
871 check_added_monitors!(nodes[2], 1);
872 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
873 assert!(bs_cs.update_add_htlcs.is_empty());
874 assert!(bs_cs.update_fail_htlcs.is_empty());
875 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
876 assert!(bs_cs.update_fulfill_htlcs.is_empty());
877 assert!(bs_cs.update_fee.is_none());
879 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
880 check_added_monitors!(nodes[1], 1);
881 let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
882 assert!(as_cs.update_add_htlcs.is_empty());
883 assert!(as_cs.update_fail_htlcs.is_empty());
884 assert!(as_cs.update_fail_malformed_htlcs.is_empty());
885 assert!(as_cs.update_fulfill_htlcs.is_empty());
886 assert!(as_cs.update_fee.is_none());
888 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
889 check_added_monitors!(nodes[1], 1);
890 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
892 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
893 check_added_monitors!(nodes[2], 1);
894 let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
896 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
897 check_added_monitors!(nodes[2], 1);
898 assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
900 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
901 check_added_monitors!(nodes[1], 1);
902 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
904 commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
907 expect_pending_htlcs_forwardable!(nodes[2]);
909 let events_6 = nodes[2].node.get_and_clear_pending_events();
910 assert_eq!(events_6.len(), 1);
912 Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
913 _ => panic!("Unexpected event"),
916 if test_ignore_second_cs {
917 expect_pending_htlcs_forwardable!(nodes[1]);
918 check_added_monitors!(nodes[1], 1);
920 send_event = SendEvent::from_node(&nodes[1]);
921 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
922 assert_eq!(send_event.msgs.len(), 1);
923 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
924 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
926 expect_pending_htlcs_forwardable!(nodes[0]);
928 let events_9 = nodes[0].node.get_and_clear_pending_events();
929 assert_eq!(events_9.len(), 1);
931 Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
932 _ => panic!("Unexpected event"),
934 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap(), 1_000_000);
937 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2, 1_000_000);
941 fn test_monitor_update_fail_raa() {
942 do_test_monitor_update_fail_raa(false);
943 do_test_monitor_update_fail_raa(true);
947 fn test_monitor_update_fail_reestablish() {
948 // Simple test for message retransmission after monitor update failure on
949 // channel_reestablish generating a monitor update (which comes from freeing holding cell
951 let chanmon_cfgs = create_chanmon_cfgs(3);
952 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
953 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
954 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
955 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
956 create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
958 let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
960 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
961 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
963 assert!(nodes[2].node.claim_funds(our_payment_preimage, &None, 1_000_000));
964 check_added_monitors!(nodes[2], 1);
965 let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
966 assert!(updates.update_add_htlcs.is_empty());
967 assert!(updates.update_fail_htlcs.is_empty());
968 assert!(updates.update_fail_malformed_htlcs.is_empty());
969 assert!(updates.update_fee.is_none());
970 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
971 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
972 check_added_monitors!(nodes[1], 1);
973 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
974 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
976 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
977 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
978 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
980 let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
981 let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
983 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
985 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
986 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
987 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
988 check_added_monitors!(nodes[1], 1);
990 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
991 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
993 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
994 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
996 assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
997 assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
999 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1001 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1002 check_added_monitors!(nodes[1], 0);
1003 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1005 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1006 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1007 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1008 check_added_monitors!(nodes[1], 0);
1010 updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1011 assert!(updates.update_add_htlcs.is_empty());
1012 assert!(updates.update_fail_htlcs.is_empty());
1013 assert!(updates.update_fail_malformed_htlcs.is_empty());
1014 assert!(updates.update_fee.is_none());
1015 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1016 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1017 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1019 let events = nodes[0].node.get_and_clear_pending_events();
1020 assert_eq!(events.len(), 1);
1022 Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage),
1023 _ => panic!("Unexpected event"),
1028 fn raa_no_response_awaiting_raa_state() {
1029 // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1030 // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1031 // in question (assuming it intends to respond with a CS after monitor updating is restored).
1032 // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1033 let chanmon_cfgs = create_chanmon_cfgs(2);
1034 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1035 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1036 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1037 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1039 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1040 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1041 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1042 let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
1044 // Queue up two payments - one will be delivered right away, one immediately goes into the
1045 // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1046 // immediately after a CS. By setting failing the monitor update failure from the CS (which
1047 // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1048 // generation during RAA while in monitor-update-failed state.
1049 nodes[0].node.send_payment(route.clone(), payment_hash_1, &None).unwrap();
1050 check_added_monitors!(nodes[0], 1);
1051 nodes[0].node.send_payment(route.clone(), payment_hash_2, &None).unwrap();
1052 check_added_monitors!(nodes[0], 0);
1054 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1055 assert_eq!(events.len(), 1);
1056 let payment_event = SendEvent::from_event(events.pop().unwrap());
1057 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1058 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1059 check_added_monitors!(nodes[1], 1);
1061 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1062 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1063 check_added_monitors!(nodes[0], 1);
1064 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1065 assert_eq!(events.len(), 1);
1066 let payment_event = SendEvent::from_event(events.pop().unwrap());
1068 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1069 check_added_monitors!(nodes[0], 1);
1070 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1072 // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1073 // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1074 // then restore channel monitor updates.
1075 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1076 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1077 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1078 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1079 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1080 check_added_monitors!(nodes[1], 1);
1082 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1083 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1084 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
1085 check_added_monitors!(nodes[1], 1);
1087 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1088 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1089 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1090 // nodes[1] should be AwaitingRAA here!
1091 check_added_monitors!(nodes[1], 0);
1092 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1093 expect_pending_htlcs_forwardable!(nodes[1]);
1094 expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1096 // We send a third payment here, which is somewhat of a redundant test, but the
1097 // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1098 // commitment transaction states) whereas here we can explicitly check for it.
1099 nodes[0].node.send_payment(route.clone(), payment_hash_3, &None).unwrap();
1100 check_added_monitors!(nodes[0], 0);
1101 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1103 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1104 check_added_monitors!(nodes[0], 1);
1105 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1106 assert_eq!(events.len(), 1);
1107 let payment_event = SendEvent::from_event(events.pop().unwrap());
1109 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1110 check_added_monitors!(nodes[0], 1);
1111 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1113 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1114 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1115 check_added_monitors!(nodes[1], 1);
1116 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1118 // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1119 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1120 check_added_monitors!(nodes[1], 1);
1121 expect_pending_htlcs_forwardable!(nodes[1]);
1122 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1123 let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1125 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1126 check_added_monitors!(nodes[0], 1);
1128 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1129 check_added_monitors!(nodes[0], 1);
1130 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1132 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1133 check_added_monitors!(nodes[1], 1);
1134 expect_pending_htlcs_forwardable!(nodes[1]);
1135 expect_payment_received!(nodes[1], payment_hash_3, 1000000);
1137 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1138 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1139 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3, 1_000_000);
1143 fn claim_while_disconnected_monitor_update_fail() {
1144 // Test for claiming a payment while disconnected and then having the resulting
1145 // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1146 // contrived case for nodes with network instability.
1147 // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1148 // code introduced a regression in this test (specifically, this caught a removal of the
1149 // channel_reestablish handling ensuring the order was sensical given the messages used).
1150 let chanmon_cfgs = create_chanmon_cfgs(2);
1151 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1152 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1153 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1154 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1156 // Forward a payment for B to claim
1157 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1159 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1160 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1162 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1163 check_added_monitors!(nodes[1], 1);
1165 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1166 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1168 let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1169 let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1171 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1172 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1174 // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1176 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1178 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1179 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1180 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1181 check_added_monitors!(nodes[1], 1);
1182 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1184 // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1185 // the monitor still failed
1186 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1187 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1188 nodes[0].node.send_payment(route, payment_hash_2, &None).unwrap();
1189 check_added_monitors!(nodes[0], 1);
1191 let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1192 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1193 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1194 check_added_monitors!(nodes[1], 1);
1195 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1196 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1197 // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1198 // until we've channel_monitor_update'd and updated for the new commitment transaction.
1200 // Now un-fail the monitor, which will result in B sending its original commitment update,
1201 // receiving the commitment update from A, and the resulting commitment dances.
1202 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1203 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1204 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1205 check_added_monitors!(nodes[1], 0);
1207 let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1208 assert_eq!(bs_msgs.len(), 2);
1211 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1212 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1213 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1214 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1215 check_added_monitors!(nodes[0], 1);
1217 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1218 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1219 check_added_monitors!(nodes[1], 1);
1221 _ => panic!("Unexpected event"),
1225 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1226 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1227 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1228 check_added_monitors!(nodes[0], 1);
1230 _ => panic!("Unexpected event"),
1233 let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1235 let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1236 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1237 check_added_monitors!(nodes[0], 1);
1238 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1240 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1241 check_added_monitors!(nodes[1], 1);
1242 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1243 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1244 check_added_monitors!(nodes[1], 1);
1246 expect_pending_htlcs_forwardable!(nodes[1]);
1247 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1249 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1250 check_added_monitors!(nodes[0], 1);
1252 let events = nodes[0].node.get_and_clear_pending_events();
1253 assert_eq!(events.len(), 1);
1255 Event::PaymentSent { ref payment_preimage } => {
1256 assert_eq!(*payment_preimage, payment_preimage_1);
1258 _ => panic!("Unexpected event"),
1261 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1265 fn monitor_failed_no_reestablish_response() {
1266 // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1267 // response to a commitment_signed.
1268 // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1269 // debug_assert!() failure in channel_reestablish handling.
1270 let chanmon_cfgs = create_chanmon_cfgs(2);
1271 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1272 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1273 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1274 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1276 // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1278 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1279 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1280 nodes[0].node.send_payment(route, payment_hash_1, &None).unwrap();
1281 check_added_monitors!(nodes[0], 1);
1283 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1284 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1285 assert_eq!(events.len(), 1);
1286 let payment_event = SendEvent::from_event(events.pop().unwrap());
1287 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1288 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1289 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1290 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1291 check_added_monitors!(nodes[1], 1);
1293 // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1294 // is still failing to update monitors.
1295 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1296 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1298 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1299 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1301 let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1302 let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1304 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1305 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1307 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1308 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1309 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1310 check_added_monitors!(nodes[1], 0);
1311 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1313 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1314 check_added_monitors!(nodes[0], 1);
1315 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1316 check_added_monitors!(nodes[0], 1);
1318 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1319 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1320 check_added_monitors!(nodes[1], 1);
1322 expect_pending_htlcs_forwardable!(nodes[1]);
1323 expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1325 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1329 fn first_message_on_recv_ordering() {
1330 // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1331 // messages, we're willing to flip the order of response messages if neccessary in resposne to
1332 // a commitment_signed which needs to send an RAA first.
1333 // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1334 // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1335 // response. To do this, we start routing two payments, with the final RAA for the first being
1336 // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1337 // have no pending response but will want to send a RAA/CS (with the updates for the second
1338 // payment applied).
1339 // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1340 let chanmon_cfgs = create_chanmon_cfgs(2);
1341 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1342 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1343 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1344 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1346 // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1347 // can deliver it and fail the monitor update.
1348 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1349 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1350 nodes[0].node.send_payment(route, payment_hash_1, &None).unwrap();
1351 check_added_monitors!(nodes[0], 1);
1353 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1354 assert_eq!(events.len(), 1);
1355 let payment_event = SendEvent::from_event(events.pop().unwrap());
1356 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1357 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1358 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1359 check_added_monitors!(nodes[1], 1);
1360 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1362 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1363 check_added_monitors!(nodes[0], 1);
1364 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1365 check_added_monitors!(nodes[0], 1);
1367 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1369 // Route the second payment, generating an update_add_htlc/commitment_signed
1370 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1371 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1372 nodes[0].node.send_payment(route, payment_hash_2, &None).unwrap();
1373 check_added_monitors!(nodes[0], 1);
1374 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1375 assert_eq!(events.len(), 1);
1376 let payment_event = SendEvent::from_event(events.pop().unwrap());
1377 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1379 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1381 // Deliver the final RAA for the first payment, which does not require a response. RAAs
1382 // generally require a commitment_signed, so the fact that we're expecting an opposite response
1383 // to the next message also tests resetting the delivery order.
1384 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1385 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1386 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1387 check_added_monitors!(nodes[1], 1);
1389 // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1390 // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1391 // appropriate HTLC acceptance).
1392 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1393 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1394 check_added_monitors!(nodes[1], 1);
1395 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1396 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1398 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1399 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1400 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1401 check_added_monitors!(nodes[1], 0);
1403 expect_pending_htlcs_forwardable!(nodes[1]);
1404 expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1406 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1407 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1408 check_added_monitors!(nodes[0], 1);
1409 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1410 check_added_monitors!(nodes[0], 1);
1412 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1413 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1414 check_added_monitors!(nodes[1], 1);
1416 expect_pending_htlcs_forwardable!(nodes[1]);
1417 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1419 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1420 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1424 fn test_monitor_update_fail_claim() {
1425 // Basic test for monitor update failures when processing claim_funds calls.
1426 // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1427 // update to claim the payment. We then send a payment C->B->A, making the forward of this
1428 // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
1429 // updating and claim the payment on B.
1430 let chanmon_cfgs = create_chanmon_cfgs(3);
1431 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1432 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1433 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1434 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
1435 create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
1437 // Rebalance a bit so that we can send backwards from 3 to 2.
1438 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
1440 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1442 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1443 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1444 check_added_monitors!(nodes[1], 1);
1446 let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1447 let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1448 nodes[2].node.send_payment(route, payment_hash_2, &None).unwrap();
1449 check_added_monitors!(nodes[2], 1);
1451 // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1452 // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1453 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1455 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1456 assert_eq!(events.len(), 1);
1457 let payment_event = SendEvent::from_event(events.pop().unwrap());
1458 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1459 let events = nodes[1].node.get_and_clear_pending_msg_events();
1460 assert_eq!(events.len(), 0);
1461 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1462 commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1464 let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1465 nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
1466 commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
1468 let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
1469 assert_eq!(msg_events.len(), 1);
1470 match msg_events[0] {
1471 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
1472 assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
1473 assert_eq!(msg.contents.flags & 2, 2); // temp disabled
1475 _ => panic!("Unexpected event"),
1478 let events = nodes[2].node.get_and_clear_pending_events();
1479 assert_eq!(events.len(), 1);
1480 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1481 assert_eq!(payment_hash, payment_hash_2);
1482 assert!(!rejected_by_dest);
1483 } else { panic!("Unexpected event!"); }
1485 // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1486 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1487 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1488 check_added_monitors!(nodes[1], 0);
1490 let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1491 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1492 commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1494 let events = nodes[0].node.get_and_clear_pending_events();
1495 assert_eq!(events.len(), 1);
1496 if let Event::PaymentSent { payment_preimage, .. } = events[0] {
1497 assert_eq!(payment_preimage, payment_preimage_1);
1498 } else { panic!("Unexpected event!"); }
1502 fn test_monitor_update_on_pending_forwards() {
1503 // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1504 // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1505 // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1506 // from C to A will be pending a forward to A.
1507 let chanmon_cfgs = create_chanmon_cfgs(3);
1508 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1509 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1510 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1511 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
1512 create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
1514 // Rebalance a bit so that we can send backwards from 3 to 1.
1515 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
1517 let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1518 assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, &None));
1519 expect_pending_htlcs_forwardable!(nodes[2]);
1520 check_added_monitors!(nodes[2], 1);
1522 let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1523 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1524 commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1525 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1527 let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1528 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1529 nodes[2].node.send_payment(route, payment_hash_2, &None).unwrap();
1530 check_added_monitors!(nodes[2], 1);
1532 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1533 assert_eq!(events.len(), 1);
1534 let payment_event = SendEvent::from_event(events.pop().unwrap());
1535 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1536 commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1538 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1539 expect_pending_htlcs_forwardable!(nodes[1]);
1540 check_added_monitors!(nodes[1], 1);
1541 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1542 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1544 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1545 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1546 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1547 check_added_monitors!(nodes[1], 0);
1549 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1550 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1551 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1552 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1554 let events = nodes[0].node.get_and_clear_pending_events();
1555 assert_eq!(events.len(), 2);
1556 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1557 assert_eq!(payment_hash, payment_hash_1);
1558 assert!(rejected_by_dest);
1559 } else { panic!("Unexpected event!"); }
1561 Event::PendingHTLCsForwardable { .. } => { },
1562 _ => panic!("Unexpected event"),
1564 nodes[0].node.process_pending_htlc_forwards();
1565 expect_payment_received!(nodes[0], payment_hash_2, 1000000);
1567 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2, 1_000_000);
1571 fn monitor_update_claim_fail_no_response() {
1572 // Test for claim_funds resulting in both a monitor update failure and no message response (due
1573 // to channel being AwaitingRAA).
1574 // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1576 let chanmon_cfgs = create_chanmon_cfgs(2);
1577 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1578 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1579 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1580 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1582 // Forward a payment for B to claim
1583 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1585 // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1586 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1587 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1588 nodes[0].node.send_payment(route, payment_hash_2, &None).unwrap();
1589 check_added_monitors!(nodes[0], 1);
1591 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1592 assert_eq!(events.len(), 1);
1593 let payment_event = SendEvent::from_event(events.pop().unwrap());
1594 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1595 let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1597 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1598 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1599 check_added_monitors!(nodes[1], 1);
1600 let events = nodes[1].node.get_and_clear_pending_msg_events();
1601 assert_eq!(events.len(), 0);
1602 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1604 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1605 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1606 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1607 check_added_monitors!(nodes[1], 0);
1608 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1610 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1611 check_added_monitors!(nodes[1], 1);
1612 expect_pending_htlcs_forwardable!(nodes[1]);
1613 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1615 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1616 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1617 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1619 let events = nodes[0].node.get_and_clear_pending_events();
1620 assert_eq!(events.len(), 1);
1622 Event::PaymentSent { ref payment_preimage } => {
1623 assert_eq!(*payment_preimage, payment_preimage_1);
1625 _ => panic!("Unexpected event"),
1628 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1631 // Note that restore_between_fails with !fail_on_generate is useless
1632 // Also note that !fail_on_generate && !fail_on_signed is useless
1633 // Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails
1634 // confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and
1635 // restore_b_before_conf has no meaning if !confirm_a_first
1636 fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) {
1637 // Test that if the monitor update generated by funding_transaction_generated fails we continue
1638 // the channel setup happily after the update is restored.
1639 let chanmon_cfgs = create_chanmon_cfgs(2);
1640 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1641 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1642 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1644 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1645 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::supported(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1646 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::supported(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1648 let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
1650 if fail_on_generate {
1651 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1653 nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
1654 check_added_monitors!(nodes[0], 1);
1656 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1657 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1658 let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1659 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1660 check_added_monitors!(nodes[1], 1);
1662 if restore_between_fails {
1663 assert!(fail_on_generate);
1664 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1665 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1666 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
1667 check_added_monitors!(nodes[0], 0);
1668 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1669 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1673 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1675 assert!(restore_between_fails || !fail_on_generate); // We can't switch to good now (there's no monitor update)
1676 assert!(fail_on_generate); // Somebody has to fail
1678 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1679 if fail_on_signed || !restore_between_fails {
1680 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1681 if fail_on_generate && !restore_between_fails {
1682 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented funding_signed from allowing funding broadcast".to_string(), 1);
1683 check_added_monitors!(nodes[0], 1);
1685 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1686 check_added_monitors!(nodes[0], 1);
1688 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1689 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1690 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1691 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
1692 check_added_monitors!(nodes[0], 0);
1694 check_added_monitors!(nodes[0], 1);
1697 let events = nodes[0].node.get_and_clear_pending_events();
1698 assert_eq!(events.len(), 1);
1700 Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
1701 assert_eq!(user_channel_id, 43);
1702 assert_eq!(*funding_txo, funding_output);
1704 _ => panic!("Unexpected event"),
1707 if confirm_a_first {
1708 confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1709 nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
1711 assert!(!restore_b_before_conf);
1712 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1713 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1716 // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
1717 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1718 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1719 reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1720 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1721 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1723 if !restore_b_before_conf {
1724 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1725 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1726 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1729 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1730 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1731 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1732 check_added_monitors!(nodes[1], 0);
1734 let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1735 nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
1737 confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1738 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1739 (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
1741 if restore_b_before_conf {
1742 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1744 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1745 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
1747 for node in nodes.iter() {
1748 assert!(node.router.handle_channel_announcement(&announcement).unwrap());
1749 node.router.handle_channel_update(&as_update).unwrap();
1750 node.router.handle_channel_update(&bs_update).unwrap();
1753 send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000);
1754 close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1758 fn during_funding_monitor_fail() {
1759 do_during_funding_monitor_fail(false, false, true, true, true);
1760 do_during_funding_monitor_fail(true, false, true, false, false);
1761 do_during_funding_monitor_fail(true, true, true, true, false);
1762 do_during_funding_monitor_fail(true, true, false, false, false);