1 //! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
3 //! There are a bunch of these as their handling is relatively error-prone so they are split out
4 //! here. See also the chanmon_fail_consistency fuzz test.
6 use chain::transaction::OutPoint;
7 use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure};
8 use ln::channelmonitor::ChannelMonitorUpdateErr;
9 use ln::features::InitFeatures;
11 use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler};
12 use routing::router::get_route;
13 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
14 use util::errors::APIError;
16 use bitcoin::hashes::sha256::Hash as Sha256;
17 use bitcoin::hashes::Hash;
19 use ln::functional_test_utils::*;
25 fn test_simple_monitor_permanent_update_fail() {
26 // Test that we handle a simple permanent monitor update failure
27 let chanmon_cfgs = create_chanmon_cfgs(2);
28 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
29 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
30 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
31 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
32 let logger = Arc::new(test_utils::TestLogger::new());
34 let (_, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
36 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
37 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
38 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
39 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable {..}, {});
40 check_added_monitors!(nodes[0], 2);
42 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
43 assert_eq!(events_1.len(), 2);
45 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
46 _ => panic!("Unexpected event"),
49 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
50 _ => panic!("Unexpected event"),
53 // TODO: Once we hit the chain with the failure transaction we should check that we get a
54 // PaymentFailed event
56 assert_eq!(nodes[0].node.list_channels().len(), 0);
59 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
60 // Test that we can recover from a simple temporary monitor update failure optionally with
61 // a disconnect in between
62 let chanmon_cfgs = create_chanmon_cfgs(2);
63 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
64 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
65 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
66 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
67 let logger = Arc::new(test_utils::TestLogger::new());
69 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
71 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
74 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
75 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
76 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), false, APIError::MonitorUpdateFailed, {});
77 check_added_monitors!(nodes[0], 1);
80 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
81 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
82 assert_eq!(nodes[0].node.list_channels().len(), 1);
85 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
86 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
87 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
90 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
91 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
92 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
93 check_added_monitors!(nodes[0], 0);
95 let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
96 assert_eq!(events_2.len(), 1);
97 let payment_event = SendEvent::from_event(events_2.pop().unwrap());
98 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
99 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
100 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
102 expect_pending_htlcs_forwardable!(nodes[1]);
104 let events_3 = nodes[1].node.get_and_clear_pending_events();
105 assert_eq!(events_3.len(), 1);
107 Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => {
108 assert_eq!(payment_hash_1, *payment_hash);
109 assert_eq!(*payment_secret, None);
110 assert_eq!(amt, 1000000);
112 _ => panic!("Unexpected event"),
115 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
117 // Now set it to failed again...
118 let (_, payment_hash_2) = get_payment_preimage_hash!(&nodes[0]);
120 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
121 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
122 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
123 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
124 check_added_monitors!(nodes[0], 1);
127 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
128 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
129 assert_eq!(nodes[0].node.list_channels().len(), 1);
132 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
133 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
134 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
137 // ...and make sure we can force-close a frozen channel
138 nodes[0].node.force_close_channel(&channel_id);
139 check_added_monitors!(nodes[0], 1);
140 check_closed_broadcast!(nodes[0], false);
142 // TODO: Once we hit the chain with the failure transaction we should check that we get a
143 // PaymentFailed event
145 assert_eq!(nodes[0].node.list_channels().len(), 0);
149 fn test_simple_monitor_temporary_update_fail() {
150 do_test_simple_monitor_temporary_update_fail(false);
151 do_test_simple_monitor_temporary_update_fail(true);
154 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
155 let disconnect_flags = 8 | 16;
157 // Test that we can recover from a temporary monitor update failure with some in-flight
158 // HTLCs going on at the same time potentially with some disconnection thrown in.
159 // * First we route a payment, then get a temporary monitor update failure when trying to
160 // route a second payment. We then claim the first payment.
161 // * If disconnect_count is set, we will disconnect at this point (which is likely as
162 // TemporaryFailure likely indicates net disconnect which resulted in failing to update
163 // the ChannelMonitor on a watchtower).
164 // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
165 // immediately, otherwise we wait disconnect and deliver them via the reconnect
166 // channel_reestablish processing (ie disconnect_count & 16 makes no sense if
167 // disconnect_count & !disconnect_flags is 0).
168 // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
169 // through message sending, potentially disconnect/reconnecting multiple times based on
170 // disconnect_count, to get the update_fulfill_htlc through.
171 // * We then walk through more message exchanges to get the original update_add_htlc
172 // through, swapping message ordering based on disconnect_count & 8 and optionally
173 // disconnect/reconnecting based on disconnect_count.
174 let chanmon_cfgs = create_chanmon_cfgs(2);
175 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
176 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
177 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
178 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
179 let logger = Arc::new(test_utils::TestLogger::new());
181 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
183 // Now try to send a second payment which will fail to send
184 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
186 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
187 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
188 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
189 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
190 check_added_monitors!(nodes[0], 1);
193 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
194 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
195 assert_eq!(nodes[0].node.list_channels().len(), 1);
197 // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
198 // but nodes[0] won't respond since it is frozen.
199 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
200 check_added_monitors!(nodes[1], 1);
201 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
202 assert_eq!(events_2.len(), 1);
203 let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
204 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
205 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
206 assert!(update_add_htlcs.is_empty());
207 assert_eq!(update_fulfill_htlcs.len(), 1);
208 assert!(update_fail_htlcs.is_empty());
209 assert!(update_fail_malformed_htlcs.is_empty());
210 assert!(update_fee.is_none());
212 if (disconnect_count & 16) == 0 {
213 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
214 let events_3 = nodes[0].node.get_and_clear_pending_events();
215 assert_eq!(events_3.len(), 1);
217 Event::PaymentSent { ref payment_preimage } => {
218 assert_eq!(*payment_preimage, payment_preimage_1);
220 _ => panic!("Unexpected event"),
223 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
224 check_added_monitors!(nodes[0], 1);
225 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
226 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
229 (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
231 _ => panic!("Unexpected event"),
234 if disconnect_count & !disconnect_flags > 0 {
235 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
236 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
239 // Now fix monitor updating...
240 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
241 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
242 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
243 check_added_monitors!(nodes[0], 0);
245 macro_rules! disconnect_reconnect_peers { () => { {
246 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
247 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
249 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
250 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
251 assert_eq!(reestablish_1.len(), 1);
252 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
253 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
254 assert_eq!(reestablish_2.len(), 1);
256 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
257 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
258 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
259 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
261 assert!(as_resp.0.is_none());
262 assert!(bs_resp.0.is_none());
264 (reestablish_1, reestablish_2, as_resp, bs_resp)
267 let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
268 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
269 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
271 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
272 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
273 assert_eq!(reestablish_1.len(), 1);
274 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
275 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
276 assert_eq!(reestablish_2.len(), 1);
278 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
279 check_added_monitors!(nodes[0], 0);
280 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
281 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
282 check_added_monitors!(nodes[1], 0);
283 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
285 assert!(as_resp.0.is_none());
286 assert!(bs_resp.0.is_none());
288 assert!(bs_resp.1.is_none());
289 if (disconnect_count & 16) == 0 {
290 assert!(bs_resp.2.is_none());
292 assert!(as_resp.1.is_some());
293 assert!(as_resp.2.is_some());
294 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
296 assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
297 assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
298 assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
299 assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
300 assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
301 assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
303 assert!(as_resp.1.is_none());
305 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
306 let events_3 = nodes[0].node.get_and_clear_pending_events();
307 assert_eq!(events_3.len(), 1);
309 Event::PaymentSent { ref payment_preimage } => {
310 assert_eq!(*payment_preimage, payment_preimage_1);
312 _ => panic!("Unexpected event"),
315 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
316 let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
317 // No commitment_signed so get_event_msg's assert(len == 1) passes
318 check_added_monitors!(nodes[0], 1);
320 as_resp.1 = Some(as_resp_raa);
324 if disconnect_count & !disconnect_flags > 1 {
325 let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
327 if (disconnect_count & 16) == 0 {
328 assert!(reestablish_1 == second_reestablish_1);
329 assert!(reestablish_2 == second_reestablish_2);
331 assert!(as_resp == second_as_resp);
332 assert!(bs_resp == second_bs_resp);
335 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
337 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
338 assert_eq!(events_4.len(), 2);
339 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
340 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
341 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
344 _ => panic!("Unexpected event"),
348 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
350 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
351 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
352 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
353 // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
354 check_added_monitors!(nodes[1], 1);
356 if disconnect_count & !disconnect_flags > 2 {
357 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
359 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
360 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
362 assert!(as_resp.2.is_none());
363 assert!(bs_resp.2.is_none());
366 let as_commitment_update;
367 let bs_second_commitment_update;
369 macro_rules! handle_bs_raa { () => {
370 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
371 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
372 assert!(as_commitment_update.update_add_htlcs.is_empty());
373 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
374 assert!(as_commitment_update.update_fail_htlcs.is_empty());
375 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
376 assert!(as_commitment_update.update_fee.is_none());
377 check_added_monitors!(nodes[0], 1);
380 macro_rules! handle_initial_raa { () => {
381 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
382 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
383 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
384 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
385 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
386 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
387 assert!(bs_second_commitment_update.update_fee.is_none());
388 check_added_monitors!(nodes[1], 1);
391 if (disconnect_count & 8) == 0 {
394 if disconnect_count & !disconnect_flags > 3 {
395 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
397 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
398 assert!(bs_resp.1.is_none());
400 assert!(as_resp.2.unwrap() == as_commitment_update);
401 assert!(bs_resp.2.is_none());
403 assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
406 handle_initial_raa!();
408 if disconnect_count & !disconnect_flags > 4 {
409 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
411 assert!(as_resp.1.is_none());
412 assert!(bs_resp.1.is_none());
414 assert!(as_resp.2.unwrap() == as_commitment_update);
415 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
418 handle_initial_raa!();
420 if disconnect_count & !disconnect_flags > 3 {
421 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
423 assert!(as_resp.1.is_none());
424 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
426 assert!(as_resp.2.is_none());
427 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
429 assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
434 if disconnect_count & !disconnect_flags > 4 {
435 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
437 assert!(as_resp.1.is_none());
438 assert!(bs_resp.1.is_none());
440 assert!(as_resp.2.unwrap() == as_commitment_update);
441 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
445 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
446 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
447 // No commitment_signed so get_event_msg's assert(len == 1) passes
448 check_added_monitors!(nodes[0], 1);
450 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
451 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
452 // No commitment_signed so get_event_msg's assert(len == 1) passes
453 check_added_monitors!(nodes[1], 1);
455 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
456 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
457 check_added_monitors!(nodes[1], 1);
459 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
460 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
461 check_added_monitors!(nodes[0], 1);
463 expect_pending_htlcs_forwardable!(nodes[1]);
465 let events_5 = nodes[1].node.get_and_clear_pending_events();
466 assert_eq!(events_5.len(), 1);
468 Event::PaymentReceived { ref payment_hash, ref payment_secret, amt } => {
469 assert_eq!(payment_hash_2, *payment_hash);
470 assert_eq!(*payment_secret, None);
471 assert_eq!(amt, 1000000);
473 _ => panic!("Unexpected event"),
476 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
480 fn test_monitor_temporary_update_fail_a() {
481 do_test_monitor_temporary_update_fail(0);
482 do_test_monitor_temporary_update_fail(1);
483 do_test_monitor_temporary_update_fail(2);
484 do_test_monitor_temporary_update_fail(3);
485 do_test_monitor_temporary_update_fail(4);
486 do_test_monitor_temporary_update_fail(5);
490 fn test_monitor_temporary_update_fail_b() {
491 do_test_monitor_temporary_update_fail(2 | 8);
492 do_test_monitor_temporary_update_fail(3 | 8);
493 do_test_monitor_temporary_update_fail(4 | 8);
494 do_test_monitor_temporary_update_fail(5 | 8);
498 fn test_monitor_temporary_update_fail_c() {
499 do_test_monitor_temporary_update_fail(1 | 16);
500 do_test_monitor_temporary_update_fail(2 | 16);
501 do_test_monitor_temporary_update_fail(3 | 16);
502 do_test_monitor_temporary_update_fail(2 | 8 | 16);
503 do_test_monitor_temporary_update_fail(3 | 8 | 16);
507 fn test_monitor_update_fail_cs() {
508 // Tests handling of a monitor update failure when processing an incoming commitment_signed
509 let chanmon_cfgs = create_chanmon_cfgs(2);
510 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
511 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
512 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
513 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
514 let logger = Arc::new(test_utils::TestLogger::new());
516 let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
518 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
519 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
520 nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap();
521 check_added_monitors!(nodes[0], 1);
524 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
525 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
527 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
528 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
529 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
530 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
531 check_added_monitors!(nodes[1], 1);
532 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
534 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
535 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
536 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
537 check_added_monitors!(nodes[1], 0);
538 let responses = nodes[1].node.get_and_clear_pending_msg_events();
539 assert_eq!(responses.len(), 2);
542 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
543 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
544 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
545 check_added_monitors!(nodes[0], 1);
547 _ => panic!("Unexpected event"),
550 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
551 assert!(updates.update_add_htlcs.is_empty());
552 assert!(updates.update_fulfill_htlcs.is_empty());
553 assert!(updates.update_fail_htlcs.is_empty());
554 assert!(updates.update_fail_malformed_htlcs.is_empty());
555 assert!(updates.update_fee.is_none());
556 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
558 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
559 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
560 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
561 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
562 check_added_monitors!(nodes[0], 1);
563 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
565 _ => panic!("Unexpected event"),
568 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
569 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
570 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
571 check_added_monitors!(nodes[0], 0);
573 let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
574 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
575 check_added_monitors!(nodes[1], 1);
577 expect_pending_htlcs_forwardable!(nodes[1]);
579 let events = nodes[1].node.get_and_clear_pending_events();
580 assert_eq!(events.len(), 1);
582 Event::PaymentReceived { payment_hash, payment_secret, amt } => {
583 assert_eq!(payment_hash, our_payment_hash);
584 assert_eq!(payment_secret, None);
585 assert_eq!(amt, 1000000);
587 _ => panic!("Unexpected event"),
590 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage, 1_000_000);
594 fn test_monitor_update_fail_no_rebroadcast() {
595 // Tests handling of a monitor update failure when no message rebroadcasting on
596 // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
598 let chanmon_cfgs = create_chanmon_cfgs(2);
599 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
600 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
601 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
602 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
603 let logger = Arc::new(test_utils::TestLogger::new());
605 let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
607 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
608 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
609 nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap();
610 check_added_monitors!(nodes[0], 1);
613 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
614 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
615 let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
617 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
618 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
619 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
620 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
621 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
622 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
623 check_added_monitors!(nodes[1], 1);
625 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
626 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
627 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
628 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
629 check_added_monitors!(nodes[1], 0);
630 expect_pending_htlcs_forwardable!(nodes[1]);
632 let events = nodes[1].node.get_and_clear_pending_events();
633 assert_eq!(events.len(), 1);
635 Event::PaymentReceived { payment_hash, .. } => {
636 assert_eq!(payment_hash, our_payment_hash);
638 _ => panic!("Unexpected event"),
641 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
645 fn test_monitor_update_raa_while_paused() {
646 // Tests handling of an RAA while monitor updating has already been marked failed.
647 // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
648 let chanmon_cfgs = create_chanmon_cfgs(2);
649 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
650 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
651 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
652 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
653 let logger = Arc::new(test_utils::TestLogger::new());
655 send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000);
656 let (payment_preimage_1, our_payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
658 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
659 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
660 nodes[0].node.send_payment(&route, our_payment_hash_1, &None).unwrap();
661 check_added_monitors!(nodes[0], 1);
663 let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
665 let (payment_preimage_2, our_payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
667 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
668 let route = get_route(&nodes[1].node.get_our_node_id(), net_graph_msg_handler, &nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
669 nodes[1].node.send_payment(&route, our_payment_hash_2, &None).unwrap();
670 check_added_monitors!(nodes[1], 1);
672 let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
674 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
675 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
676 check_added_monitors!(nodes[1], 1);
677 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
679 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
680 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
681 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
682 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
683 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
684 check_added_monitors!(nodes[0], 1);
686 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
687 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
688 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
689 check_added_monitors!(nodes[0], 1);
691 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
692 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
693 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
694 check_added_monitors!(nodes[0], 0);
696 let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
697 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
698 check_added_monitors!(nodes[1], 1);
699 let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
701 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
702 check_added_monitors!(nodes[1], 1);
703 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
705 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
706 check_added_monitors!(nodes[0], 1);
707 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
709 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
710 check_added_monitors!(nodes[0], 1);
711 expect_pending_htlcs_forwardable!(nodes[0]);
712 expect_payment_received!(nodes[0], our_payment_hash_2, 1000000);
714 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
715 check_added_monitors!(nodes[1], 1);
716 expect_pending_htlcs_forwardable!(nodes[1]);
717 expect_payment_received!(nodes[1], our_payment_hash_1, 1000000);
719 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
720 claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2, 1_000_000);
723 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
724 // Tests handling of a monitor update failure when processing an incoming RAA
725 let chanmon_cfgs = create_chanmon_cfgs(3);
726 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
727 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
728 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
729 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
730 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
731 let logger = Arc::new(test_utils::TestLogger::new());
733 // Rebalance a bit so that we can send backwards from 2 to 1.
734 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
736 // Route a first payment that we'll fail backwards
737 let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
739 // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
740 assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, &None));
741 expect_pending_htlcs_forwardable!(nodes[2]);
742 check_added_monitors!(nodes[2], 1);
744 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
745 assert!(updates.update_add_htlcs.is_empty());
746 assert!(updates.update_fulfill_htlcs.is_empty());
747 assert_eq!(updates.update_fail_htlcs.len(), 1);
748 assert!(updates.update_fail_malformed_htlcs.is_empty());
749 assert!(updates.update_fee.is_none());
750 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
752 let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
753 check_added_monitors!(nodes[0], 0);
755 // While the second channel is AwaitingRAA, forward a second payment to get it into the
757 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
759 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
760 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
761 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
762 check_added_monitors!(nodes[0], 1);
765 let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
766 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
767 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
769 expect_pending_htlcs_forwardable!(nodes[1]);
770 check_added_monitors!(nodes[1], 0);
771 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
773 // Now fail monitor updating.
774 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
775 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
776 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
777 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
778 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
779 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
780 check_added_monitors!(nodes[1], 1);
782 // Attempt to forward a third payment but fail due to the second channel being unavailable
784 let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
786 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
787 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
788 nodes[0].node.send_payment(&route, payment_hash_3, &None).unwrap();
789 check_added_monitors!(nodes[0], 1);
792 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
793 send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
794 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
795 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
796 check_added_monitors!(nodes[1], 0);
798 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
799 assert_eq!(events_2.len(), 1);
800 match events_2.remove(0) {
801 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
802 assert_eq!(node_id, nodes[0].node.get_our_node_id());
803 assert!(updates.update_fulfill_htlcs.is_empty());
804 assert_eq!(updates.update_fail_htlcs.len(), 1);
805 assert!(updates.update_fail_malformed_htlcs.is_empty());
806 assert!(updates.update_add_htlcs.is_empty());
807 assert!(updates.update_fee.is_none());
809 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
810 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
812 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
813 assert_eq!(msg_events.len(), 1);
814 match msg_events[0] {
815 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
816 assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
817 assert_eq!(msg.contents.flags & 2, 2); // temp disabled
819 _ => panic!("Unexpected event"),
822 let events = nodes[0].node.get_and_clear_pending_events();
823 assert_eq!(events.len(), 1);
824 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
825 assert_eq!(payment_hash, payment_hash_3);
826 assert!(!rejected_by_dest);
827 } else { panic!("Unexpected event!"); }
829 _ => panic!("Unexpected event type!"),
832 let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
833 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
834 let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]);
835 let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
836 let route = get_route(&nodes[2].node.get_our_node_id(), net_graph_msg_handler, &nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
837 nodes[2].node.send_payment(&route, payment_hash_4, &None).unwrap();
838 check_added_monitors!(nodes[2], 1);
840 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
841 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
842 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
843 check_added_monitors!(nodes[1], 1);
844 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
845 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
846 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
847 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
848 (Some(payment_preimage_4), Some(payment_hash_4))
849 } else { (None, None) };
851 // Restore monitor updating, ensuring we immediately get a fail-back update and a
852 // update_add update.
853 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
854 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
855 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
856 check_added_monitors!(nodes[1], 0);
857 expect_pending_htlcs_forwardable!(nodes[1]);
858 check_added_monitors!(nodes[1], 1);
860 let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
861 if test_ignore_second_cs {
862 assert_eq!(events_3.len(), 3);
864 assert_eq!(events_3.len(), 2);
867 // Note that the ordering of the events for different nodes is non-prescriptive, though the
868 // ordering of the two events that both go to nodes[2] have to stay in the same order.
869 let messages_a = match events_3.pop().unwrap() {
870 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
871 assert_eq!(node_id, nodes[0].node.get_our_node_id());
872 assert!(updates.update_fulfill_htlcs.is_empty());
873 assert_eq!(updates.update_fail_htlcs.len(), 1);
874 assert!(updates.update_fail_malformed_htlcs.is_empty());
875 assert!(updates.update_add_htlcs.is_empty());
876 assert!(updates.update_fee.is_none());
877 (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
879 _ => panic!("Unexpected event type!"),
881 let raa = if test_ignore_second_cs {
882 match events_3.remove(1) {
883 MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
884 assert_eq!(node_id, nodes[2].node.get_our_node_id());
887 _ => panic!("Unexpected event"),
890 let send_event_b = SendEvent::from_event(events_3.remove(0));
891 assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
893 // Now deliver the new messages...
895 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
896 commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
897 let events_4 = nodes[0].node.get_and_clear_pending_events();
898 assert_eq!(events_4.len(), 1);
899 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
900 assert_eq!(payment_hash, payment_hash_1);
901 assert!(rejected_by_dest);
902 } else { panic!("Unexpected event!"); }
904 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
905 if test_ignore_second_cs {
906 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
907 check_added_monitors!(nodes[2], 1);
908 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
909 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
910 check_added_monitors!(nodes[2], 1);
911 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
912 assert!(bs_cs.update_add_htlcs.is_empty());
913 assert!(bs_cs.update_fail_htlcs.is_empty());
914 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
915 assert!(bs_cs.update_fulfill_htlcs.is_empty());
916 assert!(bs_cs.update_fee.is_none());
918 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
919 check_added_monitors!(nodes[1], 1);
920 let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
921 assert!(as_cs.update_add_htlcs.is_empty());
922 assert!(as_cs.update_fail_htlcs.is_empty());
923 assert!(as_cs.update_fail_malformed_htlcs.is_empty());
924 assert!(as_cs.update_fulfill_htlcs.is_empty());
925 assert!(as_cs.update_fee.is_none());
927 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
928 check_added_monitors!(nodes[1], 1);
929 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
931 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
932 check_added_monitors!(nodes[2], 1);
933 let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
935 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
936 check_added_monitors!(nodes[2], 1);
937 assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
939 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
940 check_added_monitors!(nodes[1], 1);
941 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
943 commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
946 expect_pending_htlcs_forwardable!(nodes[2]);
948 let events_6 = nodes[2].node.get_and_clear_pending_events();
949 assert_eq!(events_6.len(), 1);
951 Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
952 _ => panic!("Unexpected event"),
955 if test_ignore_second_cs {
956 expect_pending_htlcs_forwardable!(nodes[1]);
957 check_added_monitors!(nodes[1], 1);
959 send_event = SendEvent::from_node(&nodes[1]);
960 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
961 assert_eq!(send_event.msgs.len(), 1);
962 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
963 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
965 expect_pending_htlcs_forwardable!(nodes[0]);
967 let events_9 = nodes[0].node.get_and_clear_pending_events();
968 assert_eq!(events_9.len(), 1);
970 Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
971 _ => panic!("Unexpected event"),
973 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap(), 1_000_000);
976 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2, 1_000_000);
980 fn test_monitor_update_fail_raa() {
981 do_test_monitor_update_fail_raa(false);
982 do_test_monitor_update_fail_raa(true);
986 fn test_monitor_update_fail_reestablish() {
987 // Simple test for message retransmission after monitor update failure on
988 // channel_reestablish generating a monitor update (which comes from freeing holding cell
990 let chanmon_cfgs = create_chanmon_cfgs(3);
991 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
992 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
993 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
994 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
995 create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
997 let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
999 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1000 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1002 assert!(nodes[2].node.claim_funds(our_payment_preimage, &None, 1_000_000));
1003 check_added_monitors!(nodes[2], 1);
1004 let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1005 assert!(updates.update_add_htlcs.is_empty());
1006 assert!(updates.update_fail_htlcs.is_empty());
1007 assert!(updates.update_fail_malformed_htlcs.is_empty());
1008 assert!(updates.update_fee.is_none());
1009 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1010 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1011 check_added_monitors!(nodes[1], 1);
1012 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1013 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1015 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1016 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1017 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1019 let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1020 let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1022 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1024 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1025 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1026 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1027 check_added_monitors!(nodes[1], 1);
1029 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1030 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1032 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1033 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1035 assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
1036 assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
1038 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1040 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1041 check_added_monitors!(nodes[1], 0);
1042 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1044 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1045 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1046 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1047 check_added_monitors!(nodes[1], 0);
1049 updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1050 assert!(updates.update_add_htlcs.is_empty());
1051 assert!(updates.update_fail_htlcs.is_empty());
1052 assert!(updates.update_fail_malformed_htlcs.is_empty());
1053 assert!(updates.update_fee.is_none());
1054 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1055 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1056 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1058 let events = nodes[0].node.get_and_clear_pending_events();
1059 assert_eq!(events.len(), 1);
1061 Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage),
1062 _ => panic!("Unexpected event"),
1067 fn raa_no_response_awaiting_raa_state() {
1068 // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1069 // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1070 // in question (assuming it intends to respond with a CS after monitor updating is restored).
1071 // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1072 let chanmon_cfgs = create_chanmon_cfgs(2);
1073 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1074 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1075 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1076 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1077 let logger = Arc::new(test_utils::TestLogger::new());
1079 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1080 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1081 let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
1083 // Queue up two payments - one will be delivered right away, one immediately goes into the
1084 // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1085 // immediately after a CS. By setting failing the monitor update failure from the CS (which
1086 // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1087 // generation during RAA while in monitor-update-failed state.
1089 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1090 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1091 nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
1092 check_added_monitors!(nodes[0], 1);
1093 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1094 check_added_monitors!(nodes[0], 0);
1097 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1098 assert_eq!(events.len(), 1);
1099 let payment_event = SendEvent::from_event(events.pop().unwrap());
1100 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1101 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1102 check_added_monitors!(nodes[1], 1);
1104 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1105 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1106 check_added_monitors!(nodes[0], 1);
1107 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1108 assert_eq!(events.len(), 1);
1109 let payment_event = SendEvent::from_event(events.pop().unwrap());
1111 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1112 check_added_monitors!(nodes[0], 1);
1113 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1115 // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1116 // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1117 // then restore channel monitor updates.
1118 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1119 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1120 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1121 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1122 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1123 check_added_monitors!(nodes[1], 1);
1125 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1126 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1127 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
1128 check_added_monitors!(nodes[1], 1);
1130 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1131 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1132 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1133 // nodes[1] should be AwaitingRAA here!
1134 check_added_monitors!(nodes[1], 0);
1135 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1136 expect_pending_htlcs_forwardable!(nodes[1]);
1137 expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1139 // We send a third payment here, which is somewhat of a redundant test, but the
1140 // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1141 // commitment transaction states) whereas here we can explicitly check for it.
1143 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1144 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1145 nodes[0].node.send_payment(&route, payment_hash_3, &None).unwrap();
1146 check_added_monitors!(nodes[0], 0);
1147 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1149 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1150 check_added_monitors!(nodes[0], 1);
1151 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1152 assert_eq!(events.len(), 1);
1153 let payment_event = SendEvent::from_event(events.pop().unwrap());
1155 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1156 check_added_monitors!(nodes[0], 1);
1157 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1159 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1160 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1161 check_added_monitors!(nodes[1], 1);
1162 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1164 // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1165 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1166 check_added_monitors!(nodes[1], 1);
1167 expect_pending_htlcs_forwardable!(nodes[1]);
1168 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1169 let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1171 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1172 check_added_monitors!(nodes[0], 1);
1174 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1175 check_added_monitors!(nodes[0], 1);
1176 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1178 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1179 check_added_monitors!(nodes[1], 1);
1180 expect_pending_htlcs_forwardable!(nodes[1]);
1181 expect_payment_received!(nodes[1], payment_hash_3, 1000000);
1183 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1184 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1185 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3, 1_000_000);
1189 fn claim_while_disconnected_monitor_update_fail() {
1190 // Test for claiming a payment while disconnected and then having the resulting
1191 // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1192 // contrived case for nodes with network instability.
1193 // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1194 // code introduced a regression in this test (specifically, this caught a removal of the
1195 // channel_reestablish handling ensuring the order was sensical given the messages used).
1196 let chanmon_cfgs = create_chanmon_cfgs(2);
1197 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1198 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1199 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1200 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1201 let logger = Arc::new(test_utils::TestLogger::new());
1203 // Forward a payment for B to claim
1204 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1206 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1207 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1209 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1210 check_added_monitors!(nodes[1], 1);
1212 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1213 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1215 let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1216 let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1218 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1219 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1221 // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1223 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1225 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1226 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1227 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1228 check_added_monitors!(nodes[1], 1);
1229 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1231 // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1232 // the monitor still failed
1233 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1235 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1236 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1237 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1238 check_added_monitors!(nodes[0], 1);
1241 let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1242 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1243 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1244 check_added_monitors!(nodes[1], 1);
1245 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1246 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1247 // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1248 // until we've channel_monitor_update'd and updated for the new commitment transaction.
1250 // Now un-fail the monitor, which will result in B sending its original commitment update,
1251 // receiving the commitment update from A, and the resulting commitment dances.
1252 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1253 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1254 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1255 check_added_monitors!(nodes[1], 0);
1257 let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1258 assert_eq!(bs_msgs.len(), 2);
1261 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1262 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1263 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1264 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1265 check_added_monitors!(nodes[0], 1);
1267 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1268 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1269 check_added_monitors!(nodes[1], 1);
1271 _ => panic!("Unexpected event"),
1275 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1276 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1277 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1278 check_added_monitors!(nodes[0], 1);
1280 _ => panic!("Unexpected event"),
1283 let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1285 let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1286 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1287 check_added_monitors!(nodes[0], 1);
1288 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1290 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1291 check_added_monitors!(nodes[1], 1);
1292 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1293 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1294 check_added_monitors!(nodes[1], 1);
1296 expect_pending_htlcs_forwardable!(nodes[1]);
1297 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1299 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1300 check_added_monitors!(nodes[0], 1);
1302 let events = nodes[0].node.get_and_clear_pending_events();
1303 assert_eq!(events.len(), 1);
1305 Event::PaymentSent { ref payment_preimage } => {
1306 assert_eq!(*payment_preimage, payment_preimage_1);
1308 _ => panic!("Unexpected event"),
1311 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1315 fn monitor_failed_no_reestablish_response() {
1316 // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1317 // response to a commitment_signed.
1318 // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1319 // debug_assert!() failure in channel_reestablish handling.
1320 let chanmon_cfgs = create_chanmon_cfgs(2);
1321 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1322 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1323 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1324 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1325 let logger = Arc::new(test_utils::TestLogger::new());
1327 // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1329 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1331 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1332 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1333 nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
1334 check_added_monitors!(nodes[0], 1);
1337 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1338 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1339 assert_eq!(events.len(), 1);
1340 let payment_event = SendEvent::from_event(events.pop().unwrap());
1341 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1342 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1343 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1344 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1345 check_added_monitors!(nodes[1], 1);
1347 // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1348 // is still failing to update monitors.
1349 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1350 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1352 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1353 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1355 let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1356 let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1358 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1359 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1361 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1362 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1363 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1364 check_added_monitors!(nodes[1], 0);
1365 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1367 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1368 check_added_monitors!(nodes[0], 1);
1369 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1370 check_added_monitors!(nodes[0], 1);
1372 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1373 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1374 check_added_monitors!(nodes[1], 1);
1376 expect_pending_htlcs_forwardable!(nodes[1]);
1377 expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1379 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1383 fn first_message_on_recv_ordering() {
1384 // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1385 // messages, we're willing to flip the order of response messages if neccessary in resposne to
1386 // a commitment_signed which needs to send an RAA first.
1387 // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1388 // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1389 // response. To do this, we start routing two payments, with the final RAA for the first being
1390 // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1391 // have no pending response but will want to send a RAA/CS (with the updates for the second
1392 // payment applied).
1393 // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1394 let chanmon_cfgs = create_chanmon_cfgs(2);
1395 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1396 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1397 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1398 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1399 let logger = Arc::new(test_utils::TestLogger::new());
1401 // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1402 // can deliver it and fail the monitor update.
1403 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
1405 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1406 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1407 nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
1408 check_added_monitors!(nodes[0], 1);
1411 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1412 assert_eq!(events.len(), 1);
1413 let payment_event = SendEvent::from_event(events.pop().unwrap());
1414 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1415 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1416 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1417 check_added_monitors!(nodes[1], 1);
1418 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1420 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1421 check_added_monitors!(nodes[0], 1);
1422 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1423 check_added_monitors!(nodes[0], 1);
1425 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1427 // Route the second payment, generating an update_add_htlc/commitment_signed
1428 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1430 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1431 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1432 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1433 check_added_monitors!(nodes[0], 1);
1435 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1436 assert_eq!(events.len(), 1);
1437 let payment_event = SendEvent::from_event(events.pop().unwrap());
1438 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1440 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1442 // Deliver the final RAA for the first payment, which does not require a response. RAAs
1443 // generally require a commitment_signed, so the fact that we're expecting an opposite response
1444 // to the next message also tests resetting the delivery order.
1445 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1446 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1447 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1448 check_added_monitors!(nodes[1], 1);
1450 // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1451 // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1452 // appropriate HTLC acceptance).
1453 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1454 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1455 check_added_monitors!(nodes[1], 1);
1456 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1457 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
1459 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1460 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1461 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1462 check_added_monitors!(nodes[1], 0);
1464 expect_pending_htlcs_forwardable!(nodes[1]);
1465 expect_payment_received!(nodes[1], payment_hash_1, 1000000);
1467 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1468 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1469 check_added_monitors!(nodes[0], 1);
1470 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1471 check_added_monitors!(nodes[0], 1);
1473 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1474 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1475 check_added_monitors!(nodes[1], 1);
1477 expect_pending_htlcs_forwardable!(nodes[1]);
1478 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1480 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000);
1481 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1485 fn test_monitor_update_fail_claim() {
1486 // Basic test for monitor update failures when processing claim_funds calls.
1487 // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1488 // update to claim the payment. We then send a payment C->B->A, making the forward of this
1489 // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
1490 // updating and claim the payment on B.
1491 let chanmon_cfgs = create_chanmon_cfgs(3);
1492 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1493 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1494 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1495 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1496 create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1497 let logger = Arc::new(test_utils::TestLogger::new());
1499 // Rebalance a bit so that we can send backwards from 3 to 2.
1500 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
1502 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1504 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1505 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1506 check_added_monitors!(nodes[1], 1);
1508 let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1510 let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
1511 let route = get_route(&nodes[2].node.get_our_node_id(), net_graph_msg_handler, &nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1512 nodes[2].node.send_payment(&route, payment_hash_2, &None).unwrap();
1513 check_added_monitors!(nodes[2], 1);
1516 // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1517 // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1518 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1520 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1521 assert_eq!(events.len(), 1);
1522 let payment_event = SendEvent::from_event(events.pop().unwrap());
1523 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1524 let events = nodes[1].node.get_and_clear_pending_msg_events();
1525 assert_eq!(events.len(), 0);
1526 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1527 commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1529 let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1530 nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
1531 commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
1533 let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
1534 assert_eq!(msg_events.len(), 1);
1535 match msg_events[0] {
1536 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
1537 assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
1538 assert_eq!(msg.contents.flags & 2, 2); // temp disabled
1540 _ => panic!("Unexpected event"),
1543 let events = nodes[2].node.get_and_clear_pending_events();
1544 assert_eq!(events.len(), 1);
1545 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1546 assert_eq!(payment_hash, payment_hash_2);
1547 assert!(!rejected_by_dest);
1548 } else { panic!("Unexpected event!"); }
1550 // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1551 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1552 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1553 check_added_monitors!(nodes[1], 0);
1555 let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1556 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1557 commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1559 let events = nodes[0].node.get_and_clear_pending_events();
1560 assert_eq!(events.len(), 1);
1561 if let Event::PaymentSent { payment_preimage, .. } = events[0] {
1562 assert_eq!(payment_preimage, payment_preimage_1);
1563 } else { panic!("Unexpected event!"); }
1567 fn test_monitor_update_on_pending_forwards() {
1568 // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1569 // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1570 // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1571 // from C to A will be pending a forward to A.
1572 let chanmon_cfgs = create_chanmon_cfgs(3);
1573 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1574 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1575 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1576 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1577 create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1578 let logger = Arc::new(test_utils::TestLogger::new());
1580 // Rebalance a bit so that we can send backwards from 3 to 1.
1581 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000);
1583 let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1584 assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, &None));
1585 expect_pending_htlcs_forwardable!(nodes[2]);
1586 check_added_monitors!(nodes[2], 1);
1588 let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1589 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1590 commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1591 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1593 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1595 let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
1596 let route = get_route(&nodes[2].node.get_our_node_id(), net_graph_msg_handler, &nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1597 nodes[2].node.send_payment(&route, payment_hash_2, &None).unwrap();
1598 check_added_monitors!(nodes[2], 1);
1601 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1602 assert_eq!(events.len(), 1);
1603 let payment_event = SendEvent::from_event(events.pop().unwrap());
1604 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1605 commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1607 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1608 expect_pending_htlcs_forwardable!(nodes[1]);
1609 check_added_monitors!(nodes[1], 1);
1610 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1611 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1613 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1614 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1615 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1616 check_added_monitors!(nodes[1], 0);
1618 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1619 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1620 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1621 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1623 let events = nodes[0].node.get_and_clear_pending_events();
1624 assert_eq!(events.len(), 2);
1625 if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
1626 assert_eq!(payment_hash, payment_hash_1);
1627 assert!(rejected_by_dest);
1628 } else { panic!("Unexpected event!"); }
1630 Event::PendingHTLCsForwardable { .. } => { },
1631 _ => panic!("Unexpected event"),
1633 nodes[0].node.process_pending_htlc_forwards();
1634 expect_payment_received!(nodes[0], payment_hash_2, 1000000);
1636 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2, 1_000_000);
1640 fn monitor_update_claim_fail_no_response() {
1641 // Test for claim_funds resulting in both a monitor update failure and no message response (due
1642 // to channel being AwaitingRAA).
1643 // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1645 let chanmon_cfgs = create_chanmon_cfgs(2);
1646 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1647 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1648 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1649 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
1650 let logger = Arc::new(test_utils::TestLogger::new());
1652 // Forward a payment for B to claim
1653 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
1655 // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1656 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
1658 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1659 let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1660 nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap();
1661 check_added_monitors!(nodes[0], 1);
1664 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1665 assert_eq!(events.len(), 1);
1666 let payment_event = SendEvent::from_event(events.pop().unwrap());
1667 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1668 let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1670 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1671 assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
1672 check_added_monitors!(nodes[1], 1);
1673 let events = nodes[1].node.get_and_clear_pending_msg_events();
1674 assert_eq!(events.len(), 0);
1675 nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1677 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1678 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1679 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1680 check_added_monitors!(nodes[1], 0);
1681 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1683 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1684 check_added_monitors!(nodes[1], 1);
1685 expect_pending_htlcs_forwardable!(nodes[1]);
1686 expect_payment_received!(nodes[1], payment_hash_2, 1000000);
1688 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1689 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1690 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1692 let events = nodes[0].node.get_and_clear_pending_events();
1693 assert_eq!(events.len(), 1);
1695 Event::PaymentSent { ref payment_preimage } => {
1696 assert_eq!(*payment_preimage, payment_preimage_1);
1698 _ => panic!("Unexpected event"),
1701 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
1704 // confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and
1705 // restore_b_before_conf has no meaning if !confirm_a_first
1706 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool) {
1707 // Test that if the monitor update generated by funding_transaction_generated fails we continue
1708 // the channel setup happily after the update is restored.
1709 let chanmon_cfgs = create_chanmon_cfgs(2);
1710 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1711 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1712 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1714 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1715 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1716 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1718 let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
1720 nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
1721 check_added_monitors!(nodes[0], 0);
1723 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1724 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1725 let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1726 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1727 check_added_monitors!(nodes[1], 1);
1729 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
1730 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1731 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1732 nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1733 check_added_monitors!(nodes[0], 1);
1734 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1735 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1736 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1737 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
1738 check_added_monitors!(nodes[0], 0);
1740 let events = nodes[0].node.get_and_clear_pending_events();
1741 assert_eq!(events.len(), 1);
1743 Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
1744 assert_eq!(user_channel_id, 43);
1745 assert_eq!(*funding_txo, funding_output);
1747 _ => panic!("Unexpected event"),
1750 if confirm_a_first {
1751 confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1752 nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
1754 assert!(!restore_b_before_conf);
1755 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1756 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1759 // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
1760 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1761 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1762 reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1763 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1764 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1766 if !restore_b_before_conf {
1767 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1768 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1769 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1772 *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1773 let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1774 nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
1775 check_added_monitors!(nodes[1], 0);
1777 let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1778 nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
1780 confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1781 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1782 (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
1784 if restore_b_before_conf {
1785 confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1787 let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1788 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
1790 for node in nodes.iter() {
1791 assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
1792 node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
1793 node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
1796 send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000);
1797 close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1801 fn during_funding_monitor_fail() {
1802 do_during_funding_monitor_fail(true, true);
1803 do_during_funding_monitor_fail(true, false);
1804 do_during_funding_monitor_fail(false, false);
1808 fn test_path_paused_mpp() {
1809 // Simple test of sending a multi-part payment where one path is currently blocked awaiting
1811 let chanmon_cfgs = create_chanmon_cfgs(4);
1812 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1813 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1814 let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1816 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
1817 let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
1818 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
1819 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
1820 let logger = Arc::new(test_utils::TestLogger::new());
1822 let (payment_preimage, payment_hash) = get_payment_preimage_hash!(&nodes[0]);
1823 let payment_secret = PaymentSecret([0xdb; 32]);
1824 let mut route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler, &nodes[3].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV, logger.clone()).unwrap();
1826 // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
1827 let path = route.paths[0].clone();
1828 route.paths.push(path);
1829 route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
1830 route.paths[0][0].short_channel_id = chan_1_id;
1831 route.paths[0][1].short_channel_id = chan_3_id;
1832 route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
1833 route.paths[1][0].short_channel_id = chan_2_ann.contents.short_channel_id;
1834 route.paths[1][1].short_channel_id = chan_4_id;
1836 // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
1837 // (for the path 0 -> 2 -> 3) fails.
1838 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1839 *nodes[0].chan_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
1841 // Now check that we get the right return value, indicating that the first path succeeded but
1842 // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
1843 // some paths succeeded, preventing retry.
1844 if let Err(PaymentSendFailure::PartialFailure(results)) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) {
1845 assert_eq!(results.len(), 2);
1846 if let Ok(()) = results[0] {} else { panic!(); }
1847 if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
1848 } else { panic!(); }
1849 check_added_monitors!(nodes[0], 2);
1850 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
1852 // Pass the first HTLC of the payment along to nodes[3].
1853 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1854 assert_eq!(events.len(), 1);
1855 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false);
1857 // And check that, after we successfully update the monitor for chan_2 we can pass the second
1858 // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
1859 let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
1860 nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
1861 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1862 assert_eq!(events.len(), 1);
1863 pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true);
1865 claim_payment_along_route_with_secret(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage, Some(payment_secret), 200_000);