1 //! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
3 //! There are a bunch of these as their handling is relatively error-prone so they are split out
4 //! here. See also the chanmon_fail_consistency fuzz test.
6 use chain::transaction::OutPoint;
7 use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSendFailure};
8 use ln::channelmonitor::ChannelMonitorUpdateErr;
9 use ln::features::InitFeatures;
11 use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler};
12 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
13 use util::errors::APIError;
15 use bitcoin_hashes::sha256::Hash as Sha256;
16 use bitcoin_hashes::Hash;
18 use ln::functional_test_utils::*;
21 fn test_simple_monitor_permanent_update_fail() {
22 // Test that we handle a simple permanent monitor update failure
23 let chanmon_cfgs = create_chanmon_cfgs(2);
24 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
25 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
26 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
27 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
29 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
30 let (_, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
32 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
33 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable {..}, {});
34 check_added_monitors!(nodes[0], 2);
36 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
37 assert_eq!(events_1.len(), 2);
39 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
40 _ => panic!("Unexpected event"),
43 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
44 _ => panic!("Unexpected event"),
47 // TODO: Once we hit the chain with the failure transaction we should check that we get a
48 // PaymentFailed event
50 assert_eq!(nodes[0].node.list_channels().len(), 0);
53 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
54 // Test that we can recover from a simple temporary monitor update failure optionally with
55 // a disconnect in between
56 let chanmon_cfgs = create_chanmon_cfgs(2);
57 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
58 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
59 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
60 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
62 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
63 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
65 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
67 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), false, APIError::MonitorUpdateFailed, {});
68 check_added_monitors!(nodes[0], 1);
70 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
71 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
72 assert_eq!(nodes[0].node.list_channels().len(), 1);
75 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
76 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
77 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
80 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
81 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
82 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
83 check_added_monitors!(nodes[0], 0);
85 let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
86 assert_eq!(events_2.len(), 1);
87 let payment_event = SendEvent::from_event(events_2.pop().unwrap());
88 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
89 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
90 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
92 expect_pending_htlcs_forwardable!(nodes[1]);
94 let events_3 = nodes[1].node.get_and_clear_pending_events();
95 assert_eq!(events_3.len(), 1);
97 Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => {
98 assert_eq!(payment_hash_1, *payment_hash);
99 assert_eq!(*payment_secret, None);
100 assert_eq!(amt, 1000000);
102 _ => panic!("Unexpected event"),
105 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
107 // Now set it to failed again...
108 let (_, payment_hash_2) = get_payment_preimage_hash!(&nodes[0]);
109 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
110 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
111 check_added_monitors!(nodes[0], 1);
113 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
114 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
115 assert_eq!(nodes[0].node.list_channels().len(), 1);
118 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
119 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
120 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
123 // ...and make sure we can force-close a frozen channel
124 nodes[0].node.force_close_channel(&channel_id);
125 check_added_monitors!(nodes[0], 1);
126 check_closed_broadcast!(nodes[0], false);
128 // TODO: Once we hit the chain with the failure transaction we should check that we get a
129 // PaymentFailed event
131 assert_eq!(nodes[0].node.list_channels().len(), 0);
135 fn test_simple_monitor_temporary_update_fail() {
136 do_test_simple_monitor_temporary_update_fail(false);
137 do_test_simple_monitor_temporary_update_fail(true);
140 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
141 let disconnect_flags = 8 | 16;
143 // Test that we can recover from a temporary monitor update failure with some in-flight
144 // HTLCs going on at the same time potentially with some disconnection thrown in.
145 // * First we route a payment, then get a temporary monitor update failure when trying to
146 // route a second payment. We then claim the first payment.
147 // * If disconnect_count is set, we will disconnect at this point (which is likely as
148 // TemporaryFailure likely indicates net disconnect which resulted in failing to update
149 // the ChannelMonitor on a watchtower).
150 // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
151 // immediately, otherwise we wait disconnect and deliver them via the reconnect
152 // channel_reestablish processing (ie disconnect_count & 16 makes no sense if
153 // disconnect_count & !disconnect_flags is 0).
154 // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
155 // through message sending, potentially disconnect/reconnecting multiple times based on
156 // disconnect_count, to get the update_fulfill_htlc through.
157 // * We then walk through more message exchanges to get the original update_add_htlc
158 // through, swapping message ordering based on disconnect_count & 8 and optionally
159 // disconnect/reconnecting based on disconnect_count.
160 let chanmon_cfgs = create_chanmon_cfgs(2);
161 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
162 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
163 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
164 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
166 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
168 // Now try to send a second payment which will fail to send
169 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
170 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
172 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
173 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
174 check_added_monitors!(nodes[0], 1);
176 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
177 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
178 assert_eq!(nodes[0].node.list_channels().len(), 1);
180 // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
181 // but nodes[0] won't respond since it is frozen.
182 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
183 check_added_monitors!(nodes[1], 1);
184 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
185 assert_eq!(events_2.len(), 1);
186 let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
187 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
188 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
189 assert!(update_add_htlcs.is_empty());
190 assert_eq!(update_fulfill_htlcs.len(), 1);
191 assert!(update_fail_htlcs.is_empty());
192 assert!(update_fail_malformed_htlcs.is_empty());
193 assert!(update_fee.is_none());
195 if (disconnect_count & 16) == 0 {
196 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
197 let events_3 = nodes[0].node.get_and_clear_pending_events();
198 assert_eq!(events_3.len(), 1);
200 Event::PaymentSent { ref payment_preimage } => {
201 assert_eq!(*payment_preimage, payment_preimage_1);
203 _ => panic!("Unexpected event"),
206 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
207 check_added_monitors!(nodes[0], 1);
208 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
209 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
212 (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
214 _ => panic!("Unexpected event"),
217 if disconnect_count & !disconnect_flags > 0 {
218 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
219 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
222 // Now fix monitor updating...
223 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
224 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
225 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
226 check_added_monitors!(nodes[0], 0);
228 macro_rules! disconnect_reconnect_peers { () => { {
229 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
230 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
232 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
233 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
234 assert_eq!(reestablish_1.len(), 1);
235 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
236 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
237 assert_eq!(reestablish_2.len(), 1);
239 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
240 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
241 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
242 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
244 assert!(as_resp.0.is_none());
245 assert!(bs_resp.0.is_none());
247 (reestablish_1, reestablish_2, as_resp, bs_resp)
250 let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
251 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
252 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
254 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
255 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
256 assert_eq!(reestablish_1.len(), 1);
257 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
258 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
259 assert_eq!(reestablish_2.len(), 1);
261 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
262 check_added_monitors!(nodes[0], 0);
263 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
264 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
265 check_added_monitors!(nodes[1], 0);
266 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
268 assert!(as_resp.0.is_none());
269 assert!(bs_resp.0.is_none());
271 assert!(bs_resp.1.is_none());
272 if (disconnect_count & 16) == 0 {
273 assert!(bs_resp.2.is_none());
275 assert!(as_resp.1.is_some());
276 assert!(as_resp.2.is_some());
277 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
279 assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
280 assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
281 assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
282 assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
283 assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
284 assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
286 assert!(as_resp.1.is_none());
288 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
289 let events_3 = nodes[0].node.get_and_clear_pending_events();
290 assert_eq!(events_3.len(), 1);
292 Event::PaymentSent { ref payment_preimage } => {
293 assert_eq!(*payment_preimage, payment_preimage_1);
295 _ => panic!("Unexpected event"),
298 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
299 let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
300 // No commitment_signed so get_event_msg's assert(len == 1) passes
301 check_added_monitors!(nodes[0], 1);
303 as_resp.1 = Some(as_resp_raa);
307 if disconnect_count & !disconnect_flags > 1 {
308 let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
310 if (disconnect_count & 16) == 0 {
311 assert!(reestablish_1 == second_reestablish_1);
312 assert!(reestablish_2 == second_reestablish_2);
314 assert!(as_resp == second_as_resp);
315 assert!(bs_resp == second_bs_resp);
318 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
320 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
321 assert_eq!(events_4.len(), 2);
322 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
323 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
324 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
327 _ => panic!("Unexpected event"),
331 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
333 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
334 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
335 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
336 // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
337 check_added_monitors!(nodes[1], 1);
339 if disconnect_count & !disconnect_flags > 2 {
340 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
342 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
343 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
345 assert!(as_resp.2.is_none());
346 assert!(bs_resp.2.is_none());
349 let as_commitment_update;
350 let bs_second_commitment_update;
352 macro_rules! handle_bs_raa { () => {
353 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
354 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
355 assert!(as_commitment_update.update_add_htlcs.is_empty());
356 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
357 assert!(as_commitment_update.update_fail_htlcs.is_empty());
358 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
359 assert!(as_commitment_update.update_fee.is_none());
360 check_added_monitors!(nodes[0], 1);
363 macro_rules! handle_initial_raa { () => {
364 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
365 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
366 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
367 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
368 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
369 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
370 assert!(bs_second_commitment_update.update_fee.is_none());
371 check_added_monitors!(nodes[1], 1);
374 if (disconnect_count & 8) == 0 {
377 if disconnect_count & !disconnect_flags > 3 {
378 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
380 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
381 assert!(bs_resp.1.is_none());
383 assert!(as_resp.2.unwrap() == as_commitment_update);
384 assert!(bs_resp.2.is_none());
386 assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
389 handle_initial_raa!();
391 if disconnect_count & !disconnect_flags > 4 {
392 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
394 assert!(as_resp.1.is_none());
395 assert!(bs_resp.1.is_none());
397 assert!(as_resp.2.unwrap() == as_commitment_update);
398 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
401 handle_initial_raa!();
403 if disconnect_count & !disconnect_flags > 3 {
404 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
406 assert!(as_resp.1.is_none());
407 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
409 assert!(as_resp.2.is_none());
410 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
412 assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
417 if disconnect_count & !disconnect_flags > 4 {
418 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
420 assert!(as_resp.1.is_none());
421 assert!(bs_resp.1.is_none());
423 assert!(as_resp.2.unwrap() == as_commitment_update);
424 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
428 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
429 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
430 // No commitment_signed so get_event_msg's assert(len == 1) passes
431 check_added_monitors!(nodes[0], 1);
433 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
434 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
435 // No commitment_signed so get_event_msg's assert(len == 1) passes
436 check_added_monitors!(nodes[1], 1);
438 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
439 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
440 check_added_monitors!(nodes[1], 1);
442 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
443 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
444 check_added_monitors!(nodes[0], 1);
446 expect_pending_htlcs_forwardable!(nodes[1]);
448 let events_5 = nodes[1].node.get_and_clear_pending_events();
449 assert_eq!(events_5.len(), 1);
451 Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => {
452 assert_eq!(payment_hash_2, *payment_hash);
453 assert_eq!(*payment_secret, None);
454 assert_eq!(amt, 1000000);
456 _ => panic!("Unexpected event"),
459 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
463 fn test_monitor_temporary_update_fail_a() {
464 do_test_monitor_temporary_update_fail(0);
465 do_test_monitor_temporary_update_fail(1);
466 do_test_monitor_temporary_update_fail(2);
467 do_test_monitor_temporary_update_fail(3);
468 do_test_monitor_temporary_update_fail(4);
469 do_test_monitor_temporary_update_fail(5);
473 fn test_monitor_temporary_update_fail_b() {
474 do_test_monitor_temporary_update_fail(2 | 8);
475 do_test_monitor_temporary_update_fail(3 | 8);
476 do_test_monitor_temporary_update_fail(4 | 8);
477 do_test_monitor_temporary_update_fail(5 | 8);
481 fn test_monitor_temporary_update_fail_c() {
482 do_test_monitor_temporary_update_fail(1 | 16);
483 do_test_monitor_temporary_update_fail(2 | 16);
484 do_test_monitor_temporary_update_fail(3 | 16);
485 do_test_monitor_temporary_update_fail(2 | 8 | 16);
486 do_test_monitor_temporary_update_fail(3 | 8 | 16);
490 fn test_monitor_update_fail_cs() {
491 // Tests handling of a monitor update failure when processing an incoming commitment_signed
492 let chanmon_cfgs = create_chanmon_cfgs(2);
493 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
494 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
495 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
496 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
498 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
499 let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
500 nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap();
501 check_added_monitors!(nodes[0], 1);
503 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
504 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
506 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
507 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
508 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
509 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
510 check_added_monitors!(nodes[1], 1);
511 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
513 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
514 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
515 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
516 check_added_monitors!(nodes[1], 0);
517 let responses = nodes[1].node.get_and_clear_pending_msg_events();
518 assert_eq!(responses.len(), 2);
521 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
522 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
523 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
524 check_added_monitors!(nodes[0], 1);
526 _ => panic!("Unexpected event"),
529 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
530 assert!(updates.update_add_htlcs.is_empty());
531 assert!(updates.update_fulfill_htlcs.is_empty());
532 assert!(updates.update_fail_htlcs.is_empty());
533 assert!(updates.update_fail_malformed_htlcs.is_empty());
534 assert!(updates.update_fee.is_none());
535 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
537 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
538 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
539 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
540 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
541 check_added_monitors!(nodes[0], 1);
542 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
544 _ => panic!("Unexpected event"),
547 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
548 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
549 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
550 check_added_monitors!(nodes[0], 0);
552 let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
553 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
554 check_added_monitors!(nodes[1], 1);
556 expect_pending_htlcs_forwardable!(nodes[1]);
558 let events = nodes[1].node.get_and_clear_pending_events();
559 assert_eq!(events.len(), 1);
561 Event::PaymentReceived { payment_hash, payment_secret, amt } => {
562 assert_eq!(payment_hash, our_payment_hash);
563 assert_eq!(payment_secret, None);
564 assert_eq!(amt, 1000000);
566 _ => panic!("Unexpected event"),
569 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage, 1_000_000);
573 fn test_monitor_update_fail_no_rebroadcast() {
574 // Tests handling of a monitor update failure when no message rebroadcasting on
575 // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
577 let chanmon_cfgs = create_chanmon_cfgs(2);
578 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
579 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
580 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
581 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
583 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
584 let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
585 nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap();
586 check_added_monitors!(nodes[0], 1);
588 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
589 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
590 let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
592 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
593 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
594 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
595 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
596 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
597 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
598 check_added_monitors!(nodes[1], 1);
600 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
601 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
602 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
603 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
604 check_added_monitors!(nodes[1], 0);
605 expect_pending_htlcs_forwardable!(nodes[1]);
607 let events = nodes[1].node.get_and_clear_pending_events();
608 assert_eq!(events.len(), 1);
610 Event::PaymentReceived { payment_hash, .. } => {
611 assert_eq!(payment_hash, our_payment_hash);
613 _ => panic!("Unexpected event"),
616 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
620 fn test_monitor_update_raa_while_paused() {
621 // Tests handling of an RAA while monitor updating has already been marked failed.
622 // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
623 let chanmon_cfgs = create_chanmon_cfgs(2);
624 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
625 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
626 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
627 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
629 send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000);
631 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
632 let (payment_preimage_1, our_payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
633 nodes[0].node.send_payment(&route, our_payment_hash_1, &None).unwrap();
634 check_added_monitors!(nodes[0], 1);
635 let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
637 let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
638 let (payment_preimage_2, our_payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
639 nodes[1].node.send_payment(&route, our_payment_hash_2, &None).unwrap();
640 check_added_monitors!(nodes[1], 1);
641 let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
643 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
644 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
645 check_added_monitors!(nodes[1], 1);
646 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
648 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
649 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
650 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
651 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
652 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
653 check_added_monitors!(nodes[0], 1);
655 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
656 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
657 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
658 check_added_monitors!(nodes[0], 1);
660 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
661 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
662 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
663 check_added_monitors!(nodes[0], 0);
665 let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
666 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
667 check_added_monitors!(nodes[1], 1);
668 let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
670 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
671 check_added_monitors!(nodes[1], 1);
672 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
674 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
675 check_added_monitors!(nodes[0], 1);
676 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
678 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
679 check_added_monitors!(nodes[0], 1);
680 expect_pending_htlcs_forwardable!(nodes[0]);
681 expect_payment_received!(nodes[0], our_payment_hash_2, 1000000);
683 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
684 check_added_monitors!(nodes[1], 1);
685 expect_pending_htlcs_forwardable!(nodes[1]);
686 expect_payment_received!(nodes[1], our_payment_hash_1, 1000000);
688 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
689 claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2, 1_000_000);
692 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
693 // Tests handling of a monitor update failure when processing an incoming RAA
694 let chanmon_cfgs = create_chanmon_cfgs(3);
695 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
696 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
697 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
698 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
699 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
701 // Rebalance a bit so that we can send backwards from 2 to 1.
702 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
704 // Route a first payment that we'll fail backwards
705 let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
707 // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
708 assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, &None));
709 expect_pending_htlcs_forwardable!(nodes[2]);
710 check_added_monitors!(nodes[2], 1);
712 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
713 assert!(updates.update_add_htlcs.is_empty());
714 assert!(updates.update_fulfill_htlcs.is_empty());
715 assert_eq!(updates.update_fail_htlcs.len(), 1);
716 assert!(updates.update_fail_malformed_htlcs.is_empty());
717 assert!(updates.update_fee.is_none());
718 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
720 let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
721 check_added_monitors!(nodes[0], 0);
723 // While the second channel is AwaitingRAA, forward a second payment to get it into the
725 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
726 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
727 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
728 check_added_monitors!(nodes[0], 1);
730 let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
731 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
732 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
734 expect_pending_htlcs_forwardable!(nodes[1]);
735 check_added_monitors!(nodes[1], 0);
736 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
738 // Now fail monitor updating.
739 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
740 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
741 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
742 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
743 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
744 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
745 check_added_monitors!(nodes[1], 1);
747 // Attempt to forward a third payment but fail due to the second channel being unavailable
750 let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
751 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
752 nodes[0].node.send_payment(&route, payment_hash_3, &None).unwrap();
753 check_added_monitors!(nodes[0], 1);
755 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
756 send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
757 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
758 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
759 check_added_monitors!(nodes[1], 0);
761 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
762 assert_eq!(events_2.len(), 1);
763 match events_2.remove(0) {
764 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
765 assert_eq!(node_id, nodes[0].node.get_our_node_id());
766 assert!(updates.update_fulfill_htlcs.is_empty());
767 assert_eq!(updates.update_fail_htlcs.len(), 1);
768 assert!(updates.update_fail_malformed_htlcs.is_empty());
769 assert!(updates.update_add_htlcs.is_empty());
770 assert!(updates.update_fee.is_none());
772 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
773 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
775 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
776 assert_eq!(msg_events.len(), 1);
777 match msg_events[0] {
778 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
779 assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
780 assert_eq!(msg.contents.flags & 2, 2); // temp disabled
782 _ => panic!("Unexpected event"),
785 let events = nodes[0].node.get_and_clear_pending_events();
786 assert_eq!(events.len(), 1);
787 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
788 assert_eq!(payment_hash, payment_hash_3);
789 assert!(!rejected_by_dest);
790 } else { panic!("Unexpected event!"); }
792 _ => panic!("Unexpected event type!"),
795 let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
796 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
797 let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]);
798 let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
799 nodes[2].node.send_payment(&route, payment_hash_4, &None).unwrap();
800 check_added_monitors!(nodes[2], 1);
802 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
803 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
804 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
805 check_added_monitors!(nodes[1], 1);
806 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
807 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
808 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
809 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
810 (Some(payment_preimage_4), Some(payment_hash_4))
811 } else { (None, None) };
813 // Restore monitor updating, ensuring we immediately get a fail-back update and a
814 // update_add update.
815 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
816 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
817 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
818 check_added_monitors!(nodes[1], 0);
819 expect_pending_htlcs_forwardable!(nodes[1]);
820 check_added_monitors!(nodes[1], 1);
822 let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
823 if test_ignore_second_cs {
824 assert_eq!(events_3.len(), 3);
826 assert_eq!(events_3.len(), 2);
829 // Note that the ordering of the events for different nodes is non-prescriptive, though the
830 // ordering of the two events that both go to nodes[2] have to stay in the same order.
831 let messages_a = match events_3.pop().unwrap() {
832 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
833 assert_eq!(node_id, nodes[0].node.get_our_node_id());
834 assert!(updates.update_fulfill_htlcs.is_empty());
835 assert_eq!(updates.update_fail_htlcs.len(), 1);
836 assert!(updates.update_fail_malformed_htlcs.is_empty());
837 assert!(updates.update_add_htlcs.is_empty());
838 assert!(updates.update_fee.is_none());
839 (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
841 _ => panic!("Unexpected event type!"),
843 let raa = if test_ignore_second_cs {
844 match events_3.remove(1) {
845 MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
846 assert_eq!(node_id, nodes[2].node.get_our_node_id());
849 _ => panic!("Unexpected event"),
852 let send_event_b = SendEvent::from_event(events_3.remove(0));
853 assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
855 // Now deliver the new messages...
857 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
858 commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
859 let events_4 = nodes[0].node.get_and_clear_pending_events();
860 assert_eq!(events_4.len(), 1);
861 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
862 assert_eq!(payment_hash, payment_hash_1);
863 assert!(rejected_by_dest);
864 } else { panic!("Unexpected event!"); }
866 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
867 if test_ignore_second_cs {
868 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
869 check_added_monitors!(nodes[2], 1);
870 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
871 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
872 check_added_monitors!(nodes[2], 1);
873 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
874 assert!(bs_cs.update_add_htlcs.is_empty());
875 assert!(bs_cs.update_fail_htlcs.is_empty());
876 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
877 assert!(bs_cs.update_fulfill_htlcs.is_empty());
878 assert!(bs_cs.update_fee.is_none());
880 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
881 check_added_monitors!(nodes[1], 1);
882 let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
883 assert!(as_cs.update_add_htlcs.is_empty());
884 assert!(as_cs.update_fail_htlcs.is_empty());
885 assert!(as_cs.update_fail_malformed_htlcs.is_empty());
886 assert!(as_cs.update_fulfill_htlcs.is_empty());
887 assert!(as_cs.update_fee.is_none());
889 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
890 check_added_monitors!(nodes[1], 1);
891 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
893 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
894 check_added_monitors!(nodes[2], 1);
895 let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
897 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
898 check_added_monitors!(nodes[2], 1);
899 assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
901 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
902 check_added_monitors!(nodes[1], 1);
903 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
905 commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
908 expect_pending_htlcs_forwardable!(nodes[2]);
910 let events_6 = nodes[2].node.get_and_clear_pending_events();
911 assert_eq!(events_6.len(), 1);
913 Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
914 _ => panic!("Unexpected event"),
917 if test_ignore_second_cs {
918 expect_pending_htlcs_forwardable!(nodes[1]);
919 check_added_monitors!(nodes[1], 1);
921 send_event = SendEvent::from_node(&nodes[1]);
922 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
923 assert_eq!(send_event.msgs.len(), 1);
924 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
925 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
927 expect_pending_htlcs_forwardable!(nodes[0]);
929 let events_9 = nodes[0].node.get_and_clear_pending_events();
930 assert_eq!(events_9.len(), 1);
932 Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
933 _ => panic!("Unexpected event"),
935 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap(), 1_000_000);
938 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2, 1_000_000);
942 fn test_monitor_update_fail_raa() {
943 do_test_monitor_update_fail_raa(false);
944 do_test_monitor_update_fail_raa(true);
948 fn test_monitor_update_fail_reestablish() {
949 // Simple test for message retransmission after monitor update failure on
950 // channel_reestablish generating a monitor update (which comes from freeing holding cell
952 let chanmon_cfgs = create_chanmon_cfgs(3);
953 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
954 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
955 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
956 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
957 create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
959 let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
961 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
962 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
964 assert!(nodes[2].node.claim_funds(our_payment_preimage, &None, 1_000_000));
965 check_added_monitors!(nodes[2], 1);
966 let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
967 assert!(updates.update_add_htlcs.is_empty());
968 assert!(updates.update_fail_htlcs.is_empty());
969 assert!(updates.update_fail_malformed_htlcs.is_empty());
970 assert!(updates.update_fee.is_none());
971 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
972 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
973 check_added_monitors!(nodes[1], 1);
974 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
975 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
977 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
978 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
979 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
981 let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
982 let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
984 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
986 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
987 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
988 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
989 check_added_monitors!(nodes[1], 1);
991 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
992 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
994 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
995 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
997 assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
998 assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
1000 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1002 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1003 check_added_monitors!(nodes[1], 0);
1004 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1006 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1007 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1008 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1009 check_added_monitors!(nodes[1], 0);
1011 updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1012 assert!(updates.update_add_htlcs.is_empty());
1013 assert!(updates.update_fail_htlcs.is_empty());
1014 assert!(updates.update_fail_malformed_htlcs.is_empty());
1015 assert!(updates.update_fee.is_none());
1016 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1017 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1018 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1020 let events = nodes[0].node.get_and_clear_pending_events();
1021 assert_eq!(events.len(), 1);
1023 Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage),
1024 _ => panic!("Unexpected event"),
1029 fn raa_no_response_awaiting_raa_state() {
1030 // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1031 // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1032 // in question (assuming it intends to respond with a CS after monitor updating is restored).
1033 // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1034 let chanmon_cfgs = create_chanmon_cfgs(2);
1035 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1036 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1037 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1038 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1040 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1041 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1042 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1043 let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
1045 // Queue up two payments - one will be delivered right away, one immediately goes into the
1046 // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1047 // immediately after a CS. By setting failing the monitor update failure from the CS (which
1048 // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1049 // generation during RAA while in monitor-update-failed state.
1050 nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
1051 check_added_monitors!(nodes[0], 1);
1052 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1053 check_added_monitors!(nodes[0], 0);
1055 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1056 assert_eq!(events.len(), 1);
1057 let payment_event = SendEvent::from_event(events.pop().unwrap());
1058 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1059 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1060 check_added_monitors!(nodes[1], 1);
1062 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1063 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1064 check_added_monitors!(nodes[0], 1);
1065 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1066 assert_eq!(events.len(), 1);
1067 let payment_event = SendEvent::from_event(events.pop().unwrap());
1069 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1070 check_added_monitors!(nodes[0], 1);
1071 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1073 // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1074 // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1075 // then restore channel monitor updates.
1076 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1077 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1078 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1079 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1080 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1081 check_added_monitors!(nodes[1], 1);
1083 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1084 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1085 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
1086 check_added_monitors!(nodes[1], 1);
1088 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1089 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1090 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1091 // nodes[1] should be AwaitingRAA here!
1092 check_added_monitors!(nodes[1], 0);
1093 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1094 expect_pending_htlcs_forwardable!(nodes[1]);
1095 expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1097 // We send a third payment here, which is somewhat of a redundant test, but the
1098 // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1099 // commitment transaction states) whereas here we can explicitly check for it.
1100 nodes[0].node.send_payment(&route, payment_hash_3, &None).unwrap();
1101 check_added_monitors!(nodes[0], 0);
1102 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1104 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1105 check_added_monitors!(nodes[0], 1);
1106 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1107 assert_eq!(events.len(), 1);
1108 let payment_event = SendEvent::from_event(events.pop().unwrap());
1110 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1111 check_added_monitors!(nodes[0], 1);
1112 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1114 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1115 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1116 check_added_monitors!(nodes[1], 1);
1117 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1119 // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1120 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1121 check_added_monitors!(nodes[1], 1);
1122 expect_pending_htlcs_forwardable!(nodes[1]);
1123 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1124 let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1126 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1127 check_added_monitors!(nodes[0], 1);
1129 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1130 check_added_monitors!(nodes[0], 1);
1131 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1133 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1134 check_added_monitors!(nodes[1], 1);
1135 expect_pending_htlcs_forwardable!(nodes[1]);
1136 expect_payment_received!(nodes[1], payment_hash_3, 1000000);
1138 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1139 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1140 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3, 1_000_000);
1144 fn claim_while_disconnected_monitor_update_fail() {
1145 // Test for claiming a payment while disconnected and then having the resulting
1146 // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1147 // contrived case for nodes with network instability.
1148 // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1149 // code introduced a regression in this test (specifically, this caught a removal of the
1150 // channel_reestablish handling ensuring the order was sensical given the messages used).
1151 let chanmon_cfgs = create_chanmon_cfgs(2);
1152 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1153 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1154 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1155 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1157 // Forward a payment for B to claim
1158 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1160 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1161 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1163 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1164 check_added_monitors!(nodes[1], 1);
1166 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1167 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1169 let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1170 let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1172 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1173 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1175 // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1177 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1179 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1180 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1181 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1182 check_added_monitors!(nodes[1], 1);
1183 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1185 // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1186 // the monitor still failed
1187 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1188 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1189 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1190 check_added_monitors!(nodes[0], 1);
1192 let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1193 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1194 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1195 check_added_monitors!(nodes[1], 1);
1196 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1197 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1198 // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1199 // until we've channel_monitor_update'd and updated for the new commitment transaction.
1201 // Now un-fail the monitor, which will result in B sending its original commitment update,
1202 // receiving the commitment update from A, and the resulting commitment dances.
1203 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1204 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1205 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1206 check_added_monitors!(nodes[1], 0);
1208 let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1209 assert_eq!(bs_msgs.len(), 2);
1212 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1213 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1214 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1215 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1216 check_added_monitors!(nodes[0], 1);
1218 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1219 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1220 check_added_monitors!(nodes[1], 1);
1222 _ => panic!("Unexpected event"),
1226 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1227 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1228 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1229 check_added_monitors!(nodes[0], 1);
1231 _ => panic!("Unexpected event"),
1234 let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1236 let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1237 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1238 check_added_monitors!(nodes[0], 1);
1239 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1241 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1242 check_added_monitors!(nodes[1], 1);
1243 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1244 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1245 check_added_monitors!(nodes[1], 1);
1247 expect_pending_htlcs_forwardable!(nodes[1]);
1248 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1250 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1251 check_added_monitors!(nodes[0], 1);
1253 let events = nodes[0].node.get_and_clear_pending_events();
1254 assert_eq!(events.len(), 1);
1256 Event::PaymentSent { ref payment_preimage } => {
1257 assert_eq!(*payment_preimage, payment_preimage_1);
1259 _ => panic!("Unexpected event"),
1262 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1266 fn monitor_failed_no_reestablish_response() {
1267 // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1268 // response to a commitment_signed.
1269 // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1270 // debug_assert!() failure in channel_reestablish handling.
1271 let chanmon_cfgs = create_chanmon_cfgs(2);
1272 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1273 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1274 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1275 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1277 // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1279 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1280 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1281 nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
1282 check_added_monitors!(nodes[0], 1);
1284 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1285 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1286 assert_eq!(events.len(), 1);
1287 let payment_event = SendEvent::from_event(events.pop().unwrap());
1288 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1289 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1290 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1291 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1292 check_added_monitors!(nodes[1], 1);
1294 // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1295 // is still failing to update monitors.
1296 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1297 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1299 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1300 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1302 let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1303 let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1305 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1306 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1308 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1309 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1310 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1311 check_added_monitors!(nodes[1], 0);
1312 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1314 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1315 check_added_monitors!(nodes[0], 1);
1316 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1317 check_added_monitors!(nodes[0], 1);
1319 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1320 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1321 check_added_monitors!(nodes[1], 1);
1323 expect_pending_htlcs_forwardable!(nodes[1]);
1324 expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1326 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1330 fn first_message_on_recv_ordering() {
1331 // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1332 // messages, we're willing to flip the order of response messages if neccessary in resposne to
1333 // a commitment_signed which needs to send an RAA first.
1334 // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1335 // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1336 // response. To do this, we start routing two payments, with the final RAA for the first being
1337 // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1338 // have no pending response but will want to send a RAA/CS (with the updates for the second
1339 // payment applied).
1340 // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1341 let chanmon_cfgs = create_chanmon_cfgs(2);
1342 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1343 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1344 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1345 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1347 // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1348 // can deliver it and fail the monitor update.
1349 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1350 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1351 nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
1352 check_added_monitors!(nodes[0], 1);
1354 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1355 assert_eq!(events.len(), 1);
1356 let payment_event = SendEvent::from_event(events.pop().unwrap());
1357 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1358 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1359 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1360 check_added_monitors!(nodes[1], 1);
1361 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1363 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1364 check_added_monitors!(nodes[0], 1);
1365 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1366 check_added_monitors!(nodes[0], 1);
1368 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1370 // Route the second payment, generating an update_add_htlc/commitment_signed
1371 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1372 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1373 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1374 check_added_monitors!(nodes[0], 1);
1375 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1376 assert_eq!(events.len(), 1);
1377 let payment_event = SendEvent::from_event(events.pop().unwrap());
1378 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1380 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1382 // Deliver the final RAA for the first payment, which does not require a response. RAAs
1383 // generally require a commitment_signed, so the fact that we're expecting an opposite response
1384 // to the next message also tests resetting the delivery order.
1385 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1386 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1387 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1388 check_added_monitors!(nodes[1], 1);
1390 // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1391 // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1392 // appropriate HTLC acceptance).
1393 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1394 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1395 check_added_monitors!(nodes[1], 1);
1396 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1397 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1399 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1400 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1401 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1402 check_added_monitors!(nodes[1], 0);
1404 expect_pending_htlcs_forwardable!(nodes[1]);
1405 expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1407 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1408 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1409 check_added_monitors!(nodes[0], 1);
1410 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1411 check_added_monitors!(nodes[0], 1);
1413 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1414 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1415 check_added_monitors!(nodes[1], 1);
1417 expect_pending_htlcs_forwardable!(nodes[1]);
1418 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1420 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1421 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1425 fn test_monitor_update_fail_claim() {
1426 // Basic test for monitor update failures when processing claim_funds calls.
1427 // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1428 // update to claim the payment. We then send a payment C->B->A, making the forward of this
1429 // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
1430 // updating and claim the payment on B.
1431 let chanmon_cfgs = create_chanmon_cfgs(3);
1432 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1433 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1434 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1435 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
1436 create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
1438 // Rebalance a bit so that we can send backwards from 3 to 2.
1439 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
1441 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1443 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1444 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1445 check_added_monitors!(nodes[1], 1);
1447 let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1448 let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1449 nodes[2].node.send_payment(&route, payment_hash_2, &None).unwrap();
1450 check_added_monitors!(nodes[2], 1);
1452 // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1453 // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1454 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1456 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1457 assert_eq!(events.len(), 1);
1458 let payment_event = SendEvent::from_event(events.pop().unwrap());
1459 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1460 let events = nodes[1].node.get_and_clear_pending_msg_events();
1461 assert_eq!(events.len(), 0);
1462 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1463 commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1465 let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1466 nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
1467 commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
1469 let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
1470 assert_eq!(msg_events.len(), 1);
1471 match msg_events[0] {
1472 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
1473 assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
1474 assert_eq!(msg.contents.flags & 2, 2); // temp disabled
1476 _ => panic!("Unexpected event"),
1479 let events = nodes[2].node.get_and_clear_pending_events();
1480 assert_eq!(events.len(), 1);
1481 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1482 assert_eq!(payment_hash, payment_hash_2);
1483 assert!(!rejected_by_dest);
1484 } else { panic!("Unexpected event!"); }
1486 // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1487 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1488 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1489 check_added_monitors!(nodes[1], 0);
1491 let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1492 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1493 commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1495 let events = nodes[0].node.get_and_clear_pending_events();
1496 assert_eq!(events.len(), 1);
1497 if let Event::PaymentSent { payment_preimage, .. } = events[0] {
1498 assert_eq!(payment_preimage, payment_preimage_1);
1499 } else { panic!("Unexpected event!"); }
1503 fn test_monitor_update_on_pending_forwards() {
1504 // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1505 // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1506 // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1507 // from C to A will be pending a forward to A.
1508 let chanmon_cfgs = create_chanmon_cfgs(3);
1509 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1510 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1511 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1512 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
1513 create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
1515 // Rebalance a bit so that we can send backwards from 3 to 1.
1516 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
1518 let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1519 assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, &None));
1520 expect_pending_htlcs_forwardable!(nodes[2]);
1521 check_added_monitors!(nodes[2], 1);
1523 let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1524 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1525 commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1526 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1528 let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1529 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1530 nodes[2].node.send_payment(&route, payment_hash_2, &None).unwrap();
1531 check_added_monitors!(nodes[2], 1);
1533 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1534 assert_eq!(events.len(), 1);
1535 let payment_event = SendEvent::from_event(events.pop().unwrap());
1536 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1537 commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1539 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1540 expect_pending_htlcs_forwardable!(nodes[1]);
1541 check_added_monitors!(nodes[1], 1);
1542 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1543 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1545 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1546 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1547 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1548 check_added_monitors!(nodes[1], 0);
1550 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1551 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1552 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1553 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1555 let events = nodes[0].node.get_and_clear_pending_events();
1556 assert_eq!(events.len(), 2);
1557 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1558 assert_eq!(payment_hash, payment_hash_1);
1559 assert!(rejected_by_dest);
1560 } else { panic!("Unexpected event!"); }
1562 Event::PendingHTLCsForwardable { .. } => { },
1563 _ => panic!("Unexpected event"),
1565 nodes[0].node.process_pending_htlc_forwards();
1566 expect_payment_received!(nodes[0], payment_hash_2, 1000000);
1568 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2, 1_000_000);
1572 fn monitor_update_claim_fail_no_response() {
1573 // Test for claim_funds resulting in both a monitor update failure and no message response (due
1574 // to channel being AwaitingRAA).
1575 // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1577 let chanmon_cfgs = create_chanmon_cfgs(2);
1578 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1579 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1580 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1581 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
1583 // Forward a payment for B to claim
1584 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1586 // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1587 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
1588 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1589 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1590 check_added_monitors!(nodes[0], 1);
1592 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1593 assert_eq!(events.len(), 1);
1594 let payment_event = SendEvent::from_event(events.pop().unwrap());
1595 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1596 let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1598 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1599 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1600 check_added_monitors!(nodes[1], 1);
1601 let events = nodes[1].node.get_and_clear_pending_msg_events();
1602 assert_eq!(events.len(), 0);
1603 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1605 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1606 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1607 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1608 check_added_monitors!(nodes[1], 0);
1609 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1611 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1612 check_added_monitors!(nodes[1], 1);
1613 expect_pending_htlcs_forwardable!(nodes[1]);
1614 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1616 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1617 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1618 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1620 let events = nodes[0].node.get_and_clear_pending_events();
1621 assert_eq!(events.len(), 1);
1623 Event::PaymentSent { ref payment_preimage } => {
1624 assert_eq!(*payment_preimage, payment_preimage_1);
1626 _ => panic!("Unexpected event"),
1629 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1632 // Note that restore_between_fails with !fail_on_generate is useless
1633 // Also note that !fail_on_generate && !fail_on_signed is useless
1634 // Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails
1635 // confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and
1636 // restore_b_before_conf has no meaning if !confirm_a_first
1637 fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) {
1638 // Test that if the monitor update generated by funding_transaction_generated fails we continue
1639 // the channel setup happily after the update is restored.
1640 let chanmon_cfgs = create_chanmon_cfgs(2);
1641 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1642 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1643 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1645 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1646 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::supported(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1647 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::supported(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1649 let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
1651 if fail_on_generate {
1652 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1654 nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
1655 check_added_monitors!(nodes[0], 1);
1657 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1658 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1659 let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1660 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1661 check_added_monitors!(nodes[1], 1);
1663 if restore_between_fails {
1664 assert!(fail_on_generate);
1665 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1666 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1667 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
1668 check_added_monitors!(nodes[0], 0);
1669 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1670 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1674 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1676 assert!(restore_between_fails || !fail_on_generate); // We can't switch to good now (there's no monitor update)
1677 assert!(fail_on_generate); // Somebody has to fail
1679 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1680 if fail_on_signed || !restore_between_fails {
1681 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1682 if fail_on_generate && !restore_between_fails {
1683 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented funding_signed from allowing funding broadcast".to_string(), 1);
1684 check_added_monitors!(nodes[0], 1);
1686 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1687 check_added_monitors!(nodes[0], 1);
1689 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1690 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1691 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1692 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
1693 check_added_monitors!(nodes[0], 0);
1695 check_added_monitors!(nodes[0], 1);
1698 let events = nodes[0].node.get_and_clear_pending_events();
1699 assert_eq!(events.len(), 1);
1701 Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
1702 assert_eq!(user_channel_id, 43);
1703 assert_eq!(*funding_txo, funding_output);
1705 _ => panic!("Unexpected event"),
1708 if confirm_a_first {
1709 confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1710 nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
1712 assert!(!restore_b_before_conf);
1713 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1714 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1717 // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
1718 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1719 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1720 reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1721 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1722 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1724 if !restore_b_before_conf {
1725 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1726 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1727 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1730 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1731 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1732 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1733 check_added_monitors!(nodes[1], 0);
1735 let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1736 nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
1738 confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1739 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1740 (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
1742 if restore_b_before_conf {
1743 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1745 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1746 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
1748 for node in nodes.iter() {
1749 assert!(node.router.handle_channel_announcement(&announcement).unwrap());
1750 node.router.handle_channel_update(&as_update).unwrap();
1751 node.router.handle_channel_update(&bs_update).unwrap();
1754 send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000);
1755 close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1759 fn during_funding_monitor_fail() {
1760 do_during_funding_monitor_fail(false, false, true, true, true);
1761 do_during_funding_monitor_fail(true, false, true, false, false);
1762 do_during_funding_monitor_fail(true, true, true, true, false);
1763 do_during_funding_monitor_fail(true, true, false, false, false);