1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
12 //! There are a bunch of these as their handling is relatively error-prone so they are split out
13 //! here. See also the chanmon_fail_consistency fuzz test.
15 use bitcoin::blockdata::constants::genesis_block;
16 use bitcoin::hash_types::BlockHash;
17 use bitcoin::network::constants::Network;
18 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
19 use crate::chain::transaction::OutPoint;
20 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
22 use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
23 use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
25 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
26 use crate::util::test_channel_signer::TestChannelSigner;
27 use crate::util::errors::APIError;
28 use crate::util::ser::{ReadableArgs, Writeable};
29 use crate::util::test_utils::TestBroadcaster;
31 use crate::ln::functional_test_utils::*;
33 use crate::util::test_utils;
36 use bitcoin::hashes::Hash;
37 use crate::prelude::*;
38 use crate::sync::{Arc, Mutex};
41 fn test_simple_monitor_permanent_update_fail() {
42 // Test that we handle a simple permanent monitor update failure
43 let chanmon_cfgs = create_chanmon_cfgs(2);
44 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
45 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
46 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
47 create_announced_chan_between_nodes(&nodes, 0, 1);
49 let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
50 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
51 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
52 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
53 ), true, APIError::ChannelUnavailable {..}, {});
54 check_added_monitors!(nodes[0], 2);
56 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
57 assert_eq!(events_1.len(), 2);
59 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
60 _ => panic!("Unexpected event"),
63 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
64 _ => panic!("Unexpected event"),
67 assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
69 // TODO: Once we hit the chain with the failure transaction we should check that we get a
70 // PaymentPathFailed event
72 assert_eq!(nodes[0].node.list_channels().len(), 0);
73 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
74 [nodes[1].node.get_our_node_id()], 100000);
78 fn test_monitor_and_persister_update_fail() {
79 // Test that if both updating the `ChannelMonitor` and persisting the updated
80 // `ChannelMonitor` fail, then the failure from updating the `ChannelMonitor`
81 // one that gets returned.
82 let chanmon_cfgs = create_chanmon_cfgs(2);
83 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
84 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
85 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
87 // Create some initial channel
88 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
89 let outpoint = OutPoint { txid: chan.3.txid(), index: 0 };
91 // Rebalance the network to generate htlc in the two directions
92 send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
94 // Route an HTLC from node 0 to node 1 (but don't settle)
95 let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
97 // Make a copy of the ChainMonitor so we can capture the error it returns on a
98 // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
99 // directly, the node would fail to be `Drop`'d at the end because its
100 // ChannelManager and ChainMonitor would be out of sync.
101 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
102 let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
103 let persister = test_utils::TestPersister::new();
104 let tx_broadcaster = TestBroadcaster {
105 txn_broadcasted: Mutex::new(Vec::new()),
106 // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
107 // that we are at height 200 so that it doesn't think we're violating the time lock
108 // requirements of transactions broadcasted at that point.
109 blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
113 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
114 let new_monitor = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
115 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
116 assert!(new_monitor == *monitor);
119 let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
120 assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
123 chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
125 // Set the persister's return value to be a InProgress.
126 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
128 // Try to update ChannelMonitor
129 nodes[1].node.claim_funds(preimage);
130 expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
131 check_added_monitors!(nodes[1], 1);
133 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
134 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
135 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
137 let mut node_0_per_peer_lock;
138 let mut node_0_peer_state_lock;
139 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
140 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
141 // Check that even though the persister is returning a InProgress,
142 // because the update is bogus, ultimately the error that's returned
143 // should be a PermanentFailure.
144 if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
145 logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
146 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
147 } else { assert!(false); }
153 check_added_monitors!(nodes[0], 1);
154 let events = nodes[0].node.get_and_clear_pending_events();
155 assert_eq!(events.len(), 1);
158 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
159 // Test that we can recover from a simple temporary monitor update failure optionally with
160 // a disconnect in between
161 let chanmon_cfgs = create_chanmon_cfgs(2);
162 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
163 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
164 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
165 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
167 let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
169 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
172 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
173 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
174 ), false, APIError::MonitorUpdateInProgress, {});
175 check_added_monitors!(nodes[0], 1);
178 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
179 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
180 assert_eq!(nodes[0].node.list_channels().len(), 1);
183 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
184 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
185 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
186 reconnect_args.send_channel_ready = (true, true);
187 reconnect_nodes(reconnect_args);
190 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
191 let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
192 nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
193 check_added_monitors!(nodes[0], 0);
195 let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
196 assert_eq!(events_2.len(), 1);
197 let payment_event = SendEvent::from_event(events_2.pop().unwrap());
198 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
199 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
200 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
202 expect_pending_htlcs_forwardable!(nodes[1]);
204 let events_3 = nodes[1].node.get_and_clear_pending_events();
205 assert_eq!(events_3.len(), 1);
207 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
208 assert_eq!(payment_hash_1, *payment_hash);
209 assert_eq!(amount_msat, 1_000_000);
210 assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
211 assert_eq!(via_channel_id, Some(channel_id));
213 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
214 assert!(payment_preimage.is_none());
215 assert_eq!(payment_secret_1, *payment_secret);
217 _ => panic!("expected PaymentPurpose::InvoicePayment")
220 _ => panic!("Unexpected event"),
223 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
225 // Now set it to failed again...
226 let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
228 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
229 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
230 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
231 ), false, APIError::MonitorUpdateInProgress, {});
232 check_added_monitors!(nodes[0], 1);
235 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
236 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
237 assert_eq!(nodes[0].node.list_channels().len(), 1);
240 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
241 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
242 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
245 // ...and make sure we can force-close a frozen channel
246 nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
247 check_added_monitors!(nodes[0], 1);
248 check_closed_broadcast!(nodes[0], true);
250 // TODO: Once we hit the chain with the failure transaction we should check that we get a
251 // PaymentPathFailed event
253 assert_eq!(nodes[0].node.list_channels().len(), 0);
254 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
258 fn test_simple_monitor_temporary_update_fail() {
259 do_test_simple_monitor_temporary_update_fail(false);
260 do_test_simple_monitor_temporary_update_fail(true);
263 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
264 let disconnect_flags = 8 | 16;
266 // Test that we can recover from a temporary monitor update failure with some in-flight
267 // HTLCs going on at the same time potentially with some disconnection thrown in.
268 // * First we route a payment, then get a temporary monitor update failure when trying to
269 // route a second payment. We then claim the first payment.
270 // * If disconnect_count is set, we will disconnect at this point (which is likely as
271 // InProgress likely indicates net disconnect which resulted in failing to update the
272 // ChannelMonitor on a watchtower).
273 // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
274 // immediately, otherwise we wait disconnect and deliver them via the reconnect
275 // channel_reestablish processing (ie disconnect_count & 16 makes no sense if
276 // disconnect_count & !disconnect_flags is 0).
277 // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
278 // through message sending, potentially disconnect/reconnecting multiple times based on
279 // disconnect_count, to get the update_fulfill_htlc through.
280 // * We then walk through more message exchanges to get the original update_add_htlc
281 // through, swapping message ordering based on disconnect_count & 8 and optionally
282 // disconnect/reconnecting based on disconnect_count.
283 let chanmon_cfgs = create_chanmon_cfgs(2);
284 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
285 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
286 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
287 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
289 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
291 // Now try to send a second payment which will fail to send
292 let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
294 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
295 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
296 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
297 ), false, APIError::MonitorUpdateInProgress, {});
298 check_added_monitors!(nodes[0], 1);
301 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
302 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
303 assert_eq!(nodes[0].node.list_channels().len(), 1);
305 // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
306 // but nodes[0] won't respond since it is frozen.
307 nodes[1].node.claim_funds(payment_preimage_1);
308 check_added_monitors!(nodes[1], 1);
309 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
311 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
312 assert_eq!(events_2.len(), 1);
313 let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
314 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
315 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
316 assert!(update_add_htlcs.is_empty());
317 assert_eq!(update_fulfill_htlcs.len(), 1);
318 assert!(update_fail_htlcs.is_empty());
319 assert!(update_fail_malformed_htlcs.is_empty());
320 assert!(update_fee.is_none());
322 if (disconnect_count & 16) == 0 {
323 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
324 let events_3 = nodes[0].node.get_and_clear_pending_events();
325 assert_eq!(events_3.len(), 1);
327 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
328 assert_eq!(*payment_preimage, payment_preimage_1);
329 assert_eq!(*payment_hash, payment_hash_1);
331 _ => panic!("Unexpected event"),
334 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
335 check_added_monitors!(nodes[0], 1);
336 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
339 (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
341 _ => panic!("Unexpected event"),
344 if disconnect_count & !disconnect_flags > 0 {
345 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
346 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
349 // Now fix monitor updating...
350 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
351 let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
352 nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
353 check_added_monitors!(nodes[0], 0);
355 macro_rules! disconnect_reconnect_peers { () => { {
356 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
357 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
359 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
360 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
362 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
363 assert_eq!(reestablish_1.len(), 1);
364 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
365 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
367 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
368 assert_eq!(reestablish_2.len(), 1);
370 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
371 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
372 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
373 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
375 assert!(as_resp.0.is_none());
376 assert!(bs_resp.0.is_none());
378 (reestablish_1, reestablish_2, as_resp, bs_resp)
381 let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
382 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
383 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
385 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
386 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
388 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
389 assert_eq!(reestablish_1.len(), 1);
390 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
391 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
393 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
394 assert_eq!(reestablish_2.len(), 1);
396 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
397 check_added_monitors!(nodes[0], 0);
398 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
399 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
400 check_added_monitors!(nodes[1], 0);
401 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
403 assert!(as_resp.0.is_none());
404 assert!(bs_resp.0.is_none());
406 assert!(bs_resp.1.is_none());
407 if (disconnect_count & 16) == 0 {
408 assert!(bs_resp.2.is_none());
410 assert!(as_resp.1.is_some());
411 assert!(as_resp.2.is_some());
412 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
414 assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
415 assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
416 assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
417 assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
418 assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
419 assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
421 assert!(as_resp.1.is_none());
423 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
424 let events_3 = nodes[0].node.get_and_clear_pending_events();
425 assert_eq!(events_3.len(), 1);
427 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
428 assert_eq!(*payment_preimage, payment_preimage_1);
429 assert_eq!(*payment_hash, payment_hash_1);
431 _ => panic!("Unexpected event"),
434 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
435 let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
436 // No commitment_signed so get_event_msg's assert(len == 1) passes
437 check_added_monitors!(nodes[0], 1);
439 as_resp.1 = Some(as_resp_raa);
443 if disconnect_count & !disconnect_flags > 1 {
444 let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
446 if (disconnect_count & 16) == 0 {
447 assert!(reestablish_1 == second_reestablish_1);
448 assert!(reestablish_2 == second_reestablish_2);
450 assert!(as_resp == second_as_resp);
451 assert!(bs_resp == second_bs_resp);
454 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
456 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
457 assert_eq!(events_4.len(), 2);
458 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
459 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
460 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
463 _ => panic!("Unexpected event"),
467 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
469 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
470 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
471 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
472 // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
473 check_added_monitors!(nodes[1], 1);
475 if disconnect_count & !disconnect_flags > 2 {
476 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
478 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
479 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
481 assert!(as_resp.2.is_none());
482 assert!(bs_resp.2.is_none());
485 let as_commitment_update;
486 let bs_second_commitment_update;
488 macro_rules! handle_bs_raa { () => {
489 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
490 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
491 assert!(as_commitment_update.update_add_htlcs.is_empty());
492 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
493 assert!(as_commitment_update.update_fail_htlcs.is_empty());
494 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
495 assert!(as_commitment_update.update_fee.is_none());
496 check_added_monitors!(nodes[0], 1);
499 macro_rules! handle_initial_raa { () => {
500 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
501 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
502 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
503 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
504 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
505 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
506 assert!(bs_second_commitment_update.update_fee.is_none());
507 check_added_monitors!(nodes[1], 1);
510 if (disconnect_count & 8) == 0 {
513 if disconnect_count & !disconnect_flags > 3 {
514 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
516 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
517 assert!(bs_resp.1.is_none());
519 assert!(as_resp.2.unwrap() == as_commitment_update);
520 assert!(bs_resp.2.is_none());
522 assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
525 handle_initial_raa!();
527 if disconnect_count & !disconnect_flags > 4 {
528 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
530 assert!(as_resp.1.is_none());
531 assert!(bs_resp.1.is_none());
533 assert!(as_resp.2.unwrap() == as_commitment_update);
534 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
537 handle_initial_raa!();
539 if disconnect_count & !disconnect_flags > 3 {
540 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
542 assert!(as_resp.1.is_none());
543 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
545 assert!(as_resp.2.is_none());
546 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
548 assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
553 if disconnect_count & !disconnect_flags > 4 {
554 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
556 assert!(as_resp.1.is_none());
557 assert!(bs_resp.1.is_none());
559 assert!(as_resp.2.unwrap() == as_commitment_update);
560 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
564 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
565 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
566 // No commitment_signed so get_event_msg's assert(len == 1) passes
567 check_added_monitors!(nodes[0], 1);
569 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
570 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
571 // No commitment_signed so get_event_msg's assert(len == 1) passes
572 check_added_monitors!(nodes[1], 1);
574 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
575 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
576 check_added_monitors!(nodes[1], 1);
578 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
579 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
580 check_added_monitors!(nodes[0], 1);
581 expect_payment_path_successful!(nodes[0]);
583 expect_pending_htlcs_forwardable!(nodes[1]);
585 let events_5 = nodes[1].node.get_and_clear_pending_events();
586 assert_eq!(events_5.len(), 1);
588 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
589 assert_eq!(payment_hash_2, *payment_hash);
590 assert_eq!(amount_msat, 1_000_000);
591 assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
592 assert_eq!(via_channel_id, Some(channel_id));
594 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
595 assert!(payment_preimage.is_none());
596 assert_eq!(payment_secret_2, *payment_secret);
598 _ => panic!("expected PaymentPurpose::InvoicePayment")
601 _ => panic!("Unexpected event"),
604 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
608 fn test_monitor_temporary_update_fail_a() {
609 do_test_monitor_temporary_update_fail(0);
610 do_test_monitor_temporary_update_fail(1);
611 do_test_monitor_temporary_update_fail(2);
612 do_test_monitor_temporary_update_fail(3);
613 do_test_monitor_temporary_update_fail(4);
614 do_test_monitor_temporary_update_fail(5);
618 fn test_monitor_temporary_update_fail_b() {
619 do_test_monitor_temporary_update_fail(2 | 8);
620 do_test_monitor_temporary_update_fail(3 | 8);
621 do_test_monitor_temporary_update_fail(4 | 8);
622 do_test_monitor_temporary_update_fail(5 | 8);
626 fn test_monitor_temporary_update_fail_c() {
627 do_test_monitor_temporary_update_fail(1 | 16);
628 do_test_monitor_temporary_update_fail(2 | 16);
629 do_test_monitor_temporary_update_fail(3 | 16);
630 do_test_monitor_temporary_update_fail(2 | 8 | 16);
631 do_test_monitor_temporary_update_fail(3 | 8 | 16);
635 fn test_monitor_update_fail_cs() {
636 // Tests handling of a monitor update failure when processing an incoming commitment_signed
637 let chanmon_cfgs = create_chanmon_cfgs(2);
638 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
639 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
640 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
641 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
643 let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
645 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
646 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
647 check_added_monitors!(nodes[0], 1);
650 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
651 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
653 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
654 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
655 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
656 check_added_monitors!(nodes[1], 1);
657 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
659 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
660 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
661 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
662 check_added_monitors!(nodes[1], 0);
663 let responses = nodes[1].node.get_and_clear_pending_msg_events();
664 assert_eq!(responses.len(), 2);
667 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
668 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
669 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
670 check_added_monitors!(nodes[0], 1);
672 _ => panic!("Unexpected event"),
675 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
676 assert!(updates.update_add_htlcs.is_empty());
677 assert!(updates.update_fulfill_htlcs.is_empty());
678 assert!(updates.update_fail_htlcs.is_empty());
679 assert!(updates.update_fail_malformed_htlcs.is_empty());
680 assert!(updates.update_fee.is_none());
681 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
683 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
684 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
685 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
686 check_added_monitors!(nodes[0], 1);
687 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
689 _ => panic!("Unexpected event"),
692 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
693 let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
694 nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
695 check_added_monitors!(nodes[0], 0);
697 let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
698 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
699 check_added_monitors!(nodes[1], 1);
701 expect_pending_htlcs_forwardable!(nodes[1]);
703 let events = nodes[1].node.get_and_clear_pending_events();
704 assert_eq!(events.len(), 1);
706 Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
707 assert_eq!(payment_hash, our_payment_hash);
708 assert_eq!(amount_msat, 1_000_000);
709 assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
710 assert_eq!(via_channel_id, Some(channel_id));
712 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
713 assert!(payment_preimage.is_none());
714 assert_eq!(our_payment_secret, *payment_secret);
716 _ => panic!("expected PaymentPurpose::InvoicePayment")
719 _ => panic!("Unexpected event"),
722 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
726 fn test_monitor_update_fail_no_rebroadcast() {
727 // Tests handling of a monitor update failure when no message rebroadcasting on
728 // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
730 let chanmon_cfgs = create_chanmon_cfgs(2);
731 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
732 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
733 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
734 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
736 let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
738 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
739 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap();
740 check_added_monitors!(nodes[0], 1);
743 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
744 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
745 let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
747 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
748 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
749 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
750 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
751 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
752 check_added_monitors!(nodes[1], 1);
754 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
755 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
756 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
757 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
758 check_added_monitors!(nodes[1], 0);
759 expect_pending_htlcs_forwardable!(nodes[1]);
761 let events = nodes[1].node.get_and_clear_pending_events();
762 assert_eq!(events.len(), 1);
764 Event::PaymentClaimable { payment_hash, .. } => {
765 assert_eq!(payment_hash, our_payment_hash);
767 _ => panic!("Unexpected event"),
770 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
774 fn test_monitor_update_raa_while_paused() {
775 // Tests handling of an RAA while monitor updating has already been marked failed.
776 // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
777 let chanmon_cfgs = create_chanmon_cfgs(2);
778 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
779 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
780 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
781 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
783 send_payment(&nodes[0], &[&nodes[1]], 5000000);
784 let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
786 nodes[0].node.send_payment_with_route(&route, our_payment_hash_1,
787 RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
788 check_added_monitors!(nodes[0], 1);
790 let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
792 let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000);
794 nodes[1].node.send_payment_with_route(&route, our_payment_hash_2,
795 RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap();
796 check_added_monitors!(nodes[1], 1);
798 let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
800 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
801 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
802 check_added_monitors!(nodes[1], 1);
803 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
805 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
806 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
807 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
808 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
809 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
810 check_added_monitors!(nodes[0], 1);
811 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
813 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
814 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
815 check_added_monitors!(nodes[0], 1);
817 let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
818 nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
819 check_added_monitors!(nodes[0], 0);
821 let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
822 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
823 check_added_monitors!(nodes[1], 1);
824 let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
826 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
827 check_added_monitors!(nodes[1], 1);
828 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
830 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
831 check_added_monitors!(nodes[0], 1);
832 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
834 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
835 check_added_monitors!(nodes[0], 1);
836 expect_pending_htlcs_forwardable!(nodes[0]);
837 expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
839 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
840 check_added_monitors!(nodes[1], 1);
841 expect_pending_htlcs_forwardable!(nodes[1]);
842 expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
844 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
845 claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
848 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
849 // Tests handling of a monitor update failure when processing an incoming RAA
850 let chanmon_cfgs = create_chanmon_cfgs(3);
851 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
852 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
853 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
854 create_announced_chan_between_nodes(&nodes, 0, 1);
855 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
857 // Rebalance a bit so that we can send backwards from 2 to 1.
858 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
860 // Route a first payment that we'll fail backwards
861 let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
863 // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
864 nodes[2].node.fail_htlc_backwards(&payment_hash_1);
865 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
866 check_added_monitors!(nodes[2], 1);
868 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
869 assert!(updates.update_add_htlcs.is_empty());
870 assert!(updates.update_fulfill_htlcs.is_empty());
871 assert_eq!(updates.update_fail_htlcs.len(), 1);
872 assert!(updates.update_fail_malformed_htlcs.is_empty());
873 assert!(updates.update_fee.is_none());
874 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
876 let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
877 check_added_monitors!(nodes[0], 0);
879 // While the second channel is AwaitingRAA, forward a second payment to get it into the
881 let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
883 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
884 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
885 check_added_monitors!(nodes[0], 1);
888 let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
889 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
890 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
892 expect_pending_htlcs_forwardable!(nodes[1]);
893 check_added_monitors!(nodes[1], 0);
894 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
896 // Now fail monitor updating.
897 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
898 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
899 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
900 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
901 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
902 check_added_monitors!(nodes[1], 1);
904 // Forward a third payment which will also be added to the holding cell, despite the channel
905 // being paused waiting a monitor update.
906 let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
908 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
909 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
910 check_added_monitors!(nodes[0], 1);
913 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
914 send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
915 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
916 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
917 check_added_monitors!(nodes[1], 0);
919 // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
920 // and not forwarded.
921 expect_pending_htlcs_forwardable!(nodes[1]);
922 check_added_monitors!(nodes[1], 0);
923 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
925 let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
926 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
927 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
928 nodes[2].node.send_payment_with_route(&route, payment_hash_4,
929 RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
930 check_added_monitors!(nodes[2], 1);
932 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
933 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
934 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
935 check_added_monitors!(nodes[1], 1);
936 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
937 (Some(payment_preimage_4), Some(payment_hash_4))
938 } else { (None, None) };
940 // Restore monitor updating, ensuring we immediately get a fail-back update and a
941 // update_add update.
942 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
943 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
944 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
945 check_added_monitors!(nodes[1], 0);
946 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
947 check_added_monitors!(nodes[1], 1);
949 let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
950 if test_ignore_second_cs {
951 assert_eq!(events_3.len(), 3);
953 assert_eq!(events_3.len(), 2);
956 // Note that the ordering of the events for different nodes is non-prescriptive, though the
957 // ordering of the two events that both go to nodes[2] have to stay in the same order.
958 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3);
959 let messages_a = match nodes_0_event {
960 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
961 assert_eq!(node_id, nodes[0].node.get_our_node_id());
962 assert!(updates.update_fulfill_htlcs.is_empty());
963 assert_eq!(updates.update_fail_htlcs.len(), 1);
964 assert!(updates.update_fail_malformed_htlcs.is_empty());
965 assert!(updates.update_add_htlcs.is_empty());
966 assert!(updates.update_fee.is_none());
967 (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
969 _ => panic!("Unexpected event type!"),
972 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
973 let send_event_b = SendEvent::from_event(nodes_2_event);
974 assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
976 let raa = if test_ignore_second_cs {
977 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
978 match nodes_2_event {
979 MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
980 assert_eq!(node_id, nodes[2].node.get_our_node_id());
983 _ => panic!("Unexpected event"),
987 // Now deliver the new messages...
989 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
990 commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
991 expect_payment_failed!(nodes[0], payment_hash_1, true);
993 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
995 if test_ignore_second_cs {
996 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
997 check_added_monitors!(nodes[2], 1);
998 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
999 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
1000 check_added_monitors!(nodes[2], 1);
1001 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1002 assert!(bs_cs.update_add_htlcs.is_empty());
1003 assert!(bs_cs.update_fail_htlcs.is_empty());
1004 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
1005 assert!(bs_cs.update_fulfill_htlcs.is_empty());
1006 assert!(bs_cs.update_fee.is_none());
1008 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1009 check_added_monitors!(nodes[1], 1);
1010 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1012 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
1013 check_added_monitors!(nodes[1], 1);
1015 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
1016 check_added_monitors!(nodes[2], 1);
1018 let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
1019 // As both messages are for nodes[1], they're in order.
1020 assert_eq!(bs_revoke_and_commit.len(), 2);
1021 match bs_revoke_and_commit[0] {
1022 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1023 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1024 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
1025 check_added_monitors!(nodes[1], 1);
1027 _ => panic!("Unexpected event"),
1030 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1032 match bs_revoke_and_commit[1] {
1033 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1034 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1035 assert!(updates.update_add_htlcs.is_empty());
1036 assert!(updates.update_fail_htlcs.is_empty());
1037 assert!(updates.update_fail_malformed_htlcs.is_empty());
1038 assert!(updates.update_fulfill_htlcs.is_empty());
1039 assert!(updates.update_fee.is_none());
1040 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1041 check_added_monitors!(nodes[1], 1);
1043 _ => panic!("Unexpected event"),
1047 assert_eq!(as_cs.update_add_htlcs.len(), 1);
1048 assert!(as_cs.update_fail_htlcs.is_empty());
1049 assert!(as_cs.update_fail_malformed_htlcs.is_empty());
1050 assert!(as_cs.update_fulfill_htlcs.is_empty());
1051 assert!(as_cs.update_fee.is_none());
1052 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1055 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
1056 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
1057 check_added_monitors!(nodes[2], 1);
1058 let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1060 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1061 check_added_monitors!(nodes[2], 1);
1062 let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1064 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
1065 check_added_monitors!(nodes[1], 1);
1066 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1068 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1069 check_added_monitors!(nodes[1], 1);
1070 let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1072 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1073 check_added_monitors!(nodes[2], 1);
1074 assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1076 expect_pending_htlcs_forwardable!(nodes[2]);
1078 let events_6 = nodes[2].node.get_and_clear_pending_events();
1079 assert_eq!(events_6.len(), 2);
1081 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
1082 _ => panic!("Unexpected event"),
1085 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1086 _ => panic!("Unexpected event"),
1089 if test_ignore_second_cs {
1090 expect_pending_htlcs_forwardable!(nodes[1]);
1091 check_added_monitors!(nodes[1], 1);
1093 send_event = SendEvent::from_node(&nodes[1]);
1094 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
1095 assert_eq!(send_event.msgs.len(), 1);
1096 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
1097 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
1099 expect_pending_htlcs_forwardable!(nodes[0]);
1101 let events_9 = nodes[0].node.get_and_clear_pending_events();
1102 assert_eq!(events_9.len(), 1);
1104 Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
1105 _ => panic!("Unexpected event"),
1107 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
1110 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
1114 fn test_monitor_update_fail_raa() {
1115 do_test_monitor_update_fail_raa(false);
1116 do_test_monitor_update_fail_raa(true);
1120 fn test_monitor_update_fail_reestablish() {
1121 // Simple test for message retransmission after monitor update failure on
1122 // channel_reestablish generating a monitor update (which comes from freeing holding cell
1124 let chanmon_cfgs = create_chanmon_cfgs(3);
1125 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1126 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1127 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1128 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1129 create_announced_chan_between_nodes(&nodes, 1, 2);
1131 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
1133 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1134 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1136 nodes[2].node.claim_funds(payment_preimage);
1137 check_added_monitors!(nodes[2], 1);
1138 expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
1140 let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1141 assert!(updates.update_add_htlcs.is_empty());
1142 assert!(updates.update_fail_htlcs.is_empty());
1143 assert!(updates.update_fail_malformed_htlcs.is_empty());
1144 assert!(updates.update_fee.is_none());
1145 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1146 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1147 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
1148 check_added_monitors!(nodes[1], 1);
1149 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1150 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1152 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1153 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1154 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1156 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1157 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1160 let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1161 let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1163 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1165 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1167 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1168 .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1170 nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
1171 check_added_monitors!(nodes[1], 1);
1173 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1174 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1176 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1177 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1179 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1180 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1183 assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
1184 assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
1186 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1188 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1189 .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1191 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1192 check_added_monitors!(nodes[1], 0);
1194 get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
1195 .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1197 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1198 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1199 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1200 check_added_monitors!(nodes[1], 0);
1202 updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1203 assert!(updates.update_add_htlcs.is_empty());
1204 assert!(updates.update_fail_htlcs.is_empty());
1205 assert!(updates.update_fail_malformed_htlcs.is_empty());
1206 assert!(updates.update_fee.is_none());
1207 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1208 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1209 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1210 expect_payment_sent!(nodes[0], payment_preimage);
1214 fn raa_no_response_awaiting_raa_state() {
1215 // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1216 // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1217 // in question (assuming it intends to respond with a CS after monitor updating is restored).
1218 // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1219 let chanmon_cfgs = create_chanmon_cfgs(2);
1220 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1221 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1222 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1223 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1225 let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1226 let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1227 let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
1229 // Queue up two payments - one will be delivered right away, one immediately goes into the
1230 // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1231 // immediately after a CS. By setting failing the monitor update failure from the CS (which
1232 // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1233 // generation during RAA while in monitor-update-failed state.
1235 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1236 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1237 check_added_monitors!(nodes[0], 1);
1238 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1239 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1240 check_added_monitors!(nodes[0], 0);
1243 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1244 assert_eq!(events.len(), 1);
1245 let payment_event = SendEvent::from_event(events.pop().unwrap());
1246 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1247 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1248 check_added_monitors!(nodes[1], 1);
1250 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1251 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1252 check_added_monitors!(nodes[0], 1);
1253 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1254 assert_eq!(events.len(), 1);
1255 let payment_event = SendEvent::from_event(events.pop().unwrap());
1257 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1258 check_added_monitors!(nodes[0], 1);
1259 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1261 // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1262 // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1263 // then restore channel monitor updates.
1264 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1265 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1266 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1267 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1268 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1269 check_added_monitors!(nodes[1], 1);
1270 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1272 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1273 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1274 check_added_monitors!(nodes[1], 1);
1276 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1277 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1278 // nodes[1] should be AwaitingRAA here!
1279 check_added_monitors!(nodes[1], 0);
1280 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1281 expect_pending_htlcs_forwardable!(nodes[1]);
1282 expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1284 // We send a third payment here, which is somewhat of a redundant test, but the
1285 // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1286 // commitment transaction states) whereas here we can explicitly check for it.
1288 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
1289 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1290 check_added_monitors!(nodes[0], 0);
1291 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1293 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1294 check_added_monitors!(nodes[0], 1);
1295 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1296 assert_eq!(events.len(), 1);
1297 let payment_event = SendEvent::from_event(events.pop().unwrap());
1299 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1300 check_added_monitors!(nodes[0], 1);
1301 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1303 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1304 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1305 check_added_monitors!(nodes[1], 1);
1306 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1308 // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1309 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1310 check_added_monitors!(nodes[1], 1);
1311 expect_pending_htlcs_forwardable!(nodes[1]);
1312 expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1313 let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1315 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1316 check_added_monitors!(nodes[0], 1);
1318 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1319 check_added_monitors!(nodes[0], 1);
1320 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1322 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1323 check_added_monitors!(nodes[1], 1);
1324 expect_pending_htlcs_forwardable!(nodes[1]);
1325 expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
1327 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1328 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1329 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
1333 fn claim_while_disconnected_monitor_update_fail() {
1334 // Test for claiming a payment while disconnected and then having the resulting
1335 // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1336 // contrived case for nodes with network instability.
1337 // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1338 // code introduced a regression in this test (specifically, this caught a removal of the
1339 // channel_reestablish handling ensuring the order was sensical given the messages used).
1340 let chanmon_cfgs = create_chanmon_cfgs(2);
1341 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1342 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1343 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1344 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1346 // Forward a payment for B to claim
1347 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1349 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1350 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1352 nodes[1].node.claim_funds(payment_preimage_1);
1353 check_added_monitors!(nodes[1], 1);
1354 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1356 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1357 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1359 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1360 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1363 let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1364 let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1366 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1367 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1369 // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1371 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1373 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1374 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1375 check_added_monitors!(nodes[1], 1);
1376 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1378 // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1379 // the monitor still failed
1380 let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1382 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1383 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1384 check_added_monitors!(nodes[0], 1);
1387 let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1388 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1389 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1390 check_added_monitors!(nodes[1], 1);
1391 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1392 // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1393 // until we've channel_monitor_update'd and updated for the new commitment transaction.
1395 // Now un-fail the monitor, which will result in B sending its original commitment update,
1396 // receiving the commitment update from A, and the resulting commitment dances.
1397 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1398 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1399 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1400 check_added_monitors!(nodes[1], 0);
1402 let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1403 assert_eq!(bs_msgs.len(), 2);
1406 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1407 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1408 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1409 expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
1410 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1411 check_added_monitors!(nodes[0], 1);
1413 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1414 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1415 check_added_monitors!(nodes[1], 1);
1417 _ => panic!("Unexpected event"),
1421 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1422 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1423 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1424 check_added_monitors!(nodes[0], 1);
1426 _ => panic!("Unexpected event"),
1429 let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1431 let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1432 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1433 check_added_monitors!(nodes[0], 1);
1434 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1436 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1437 check_added_monitors!(nodes[1], 1);
1438 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1439 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1440 check_added_monitors!(nodes[1], 1);
1442 expect_pending_htlcs_forwardable!(nodes[1]);
1443 expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1445 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1446 check_added_monitors!(nodes[0], 1);
1447 expect_payment_path_successful!(nodes[0]);
1449 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1453 fn monitor_failed_no_reestablish_response() {
1454 // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1455 // response to a commitment_signed.
1456 // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1457 // debug_assert!() failure in channel_reestablish handling.
1458 let chanmon_cfgs = create_chanmon_cfgs(2);
1459 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1460 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1461 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1462 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1464 let mut node_0_per_peer_lock;
1465 let mut node_0_peer_state_lock;
1466 get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1469 let mut node_1_per_peer_lock;
1470 let mut node_1_peer_state_lock;
1471 get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1474 // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1476 let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1478 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1479 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1480 check_added_monitors!(nodes[0], 1);
1483 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1484 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1485 assert_eq!(events.len(), 1);
1486 let payment_event = SendEvent::from_event(events.pop().unwrap());
1487 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1488 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1489 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1490 check_added_monitors!(nodes[1], 1);
1492 // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1493 // is still failing to update monitors.
1494 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1495 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1497 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1498 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1500 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1501 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1504 let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1505 let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1507 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1508 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1509 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1510 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1512 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1513 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1514 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1515 check_added_monitors!(nodes[1], 0);
1516 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1518 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1519 check_added_monitors!(nodes[0], 1);
1520 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1521 check_added_monitors!(nodes[0], 1);
1523 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1524 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1525 check_added_monitors!(nodes[1], 1);
1527 expect_pending_htlcs_forwardable!(nodes[1]);
1528 expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1530 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1534 fn first_message_on_recv_ordering() {
1535 // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1536 // messages, we're willing to flip the order of response messages if neccessary in resposne to
1537 // a commitment_signed which needs to send an RAA first.
1538 // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1539 // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1540 // response. To do this, we start routing two payments, with the final RAA for the first being
1541 // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1542 // have no pending response but will want to send a RAA/CS (with the updates for the second
1543 // payment applied).
1544 // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1545 let chanmon_cfgs = create_chanmon_cfgs(2);
1546 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1547 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1548 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1549 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1551 // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1552 // can deliver it and fail the monitor update.
1553 let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1555 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1556 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1557 check_added_monitors!(nodes[0], 1);
1560 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1561 assert_eq!(events.len(), 1);
1562 let payment_event = SendEvent::from_event(events.pop().unwrap());
1563 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1564 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1565 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1566 check_added_monitors!(nodes[1], 1);
1567 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1569 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1570 check_added_monitors!(nodes[0], 1);
1571 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1572 check_added_monitors!(nodes[0], 1);
1574 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1576 // Route the second payment, generating an update_add_htlc/commitment_signed
1577 let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1579 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1580 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1581 check_added_monitors!(nodes[0], 1);
1583 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1584 assert_eq!(events.len(), 1);
1585 let payment_event = SendEvent::from_event(events.pop().unwrap());
1586 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1588 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1590 // Deliver the final RAA for the first payment, which does not require a response. RAAs
1591 // generally require a commitment_signed, so the fact that we're expecting an opposite response
1592 // to the next message also tests resetting the delivery order.
1593 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1594 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1595 check_added_monitors!(nodes[1], 1);
1597 // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1598 // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1599 // appropriate HTLC acceptance).
1600 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1601 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1602 check_added_monitors!(nodes[1], 1);
1603 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1605 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1606 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1607 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1608 check_added_monitors!(nodes[1], 0);
1610 expect_pending_htlcs_forwardable!(nodes[1]);
1611 expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1613 let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1614 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1615 check_added_monitors!(nodes[0], 1);
1616 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1617 check_added_monitors!(nodes[0], 1);
1619 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1620 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1621 check_added_monitors!(nodes[1], 1);
1623 expect_pending_htlcs_forwardable!(nodes[1]);
1624 expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1626 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1627 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1631 fn test_monitor_update_fail_claim() {
1632 // Basic test for monitor update failures when processing claim_funds calls.
1633 // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1634 // update to claim the payment. We then send two payments C->B->A, which are held at B.
1635 // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1636 // the payments from C onwards to A.
1637 let chanmon_cfgs = create_chanmon_cfgs(3);
1638 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1639 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1640 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1641 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1642 create_announced_chan_between_nodes(&nodes, 1, 2);
1644 // Rebalance a bit so that we can send backwards from 3 to 2.
1645 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1647 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1649 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1650 nodes[1].node.claim_funds(payment_preimage_1);
1651 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1652 check_added_monitors!(nodes[1], 1);
1654 // Note that at this point there is a pending commitment transaction update for A being held by
1655 // B. Even when we go to send the payment from C through B to A, B will not update this
1656 // already-signed commitment transaction and will instead wait for it to resolve before
1657 // forwarding the payment onwards.
1659 let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000);
1661 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1662 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1663 check_added_monitors!(nodes[2], 1);
1666 // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1667 // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1668 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1670 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1671 assert_eq!(events.len(), 1);
1672 let payment_event = SendEvent::from_event(events.pop().unwrap());
1673 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1674 let events = nodes[1].node.get_and_clear_pending_msg_events();
1675 assert_eq!(events.len(), 0);
1676 commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1677 expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1679 let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1680 nodes[2].node.send_payment_with_route(&route, payment_hash_3,
1681 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1682 check_added_monitors!(nodes[2], 1);
1684 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1685 assert_eq!(events.len(), 1);
1686 let payment_event = SendEvent::from_event(events.pop().unwrap());
1687 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1688 let events = nodes[1].node.get_and_clear_pending_msg_events();
1689 assert_eq!(events.len(), 0);
1690 commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1692 // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1693 let channel_id = chan_1.2;
1694 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1695 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1696 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1697 check_added_monitors!(nodes[1], 0);
1699 let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1700 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1701 commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1702 expect_payment_sent!(nodes[0], payment_preimage_1);
1704 // Get the payment forwards, note that they were batched into one commitment update.
1705 nodes[1].node.process_pending_htlc_forwards();
1706 check_added_monitors!(nodes[1], 1);
1707 let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1708 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1709 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1710 commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1711 expect_pending_htlcs_forwardable!(nodes[0]);
1713 let events = nodes[0].node.get_and_clear_pending_events();
1714 assert_eq!(events.len(), 2);
1716 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id, .. } => {
1717 assert_eq!(payment_hash_2, *payment_hash);
1718 assert_eq!(1_000_000, amount_msat);
1719 assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1720 assert_eq!(via_channel_id, Some(channel_id));
1721 assert_eq!(via_user_channel_id, Some(42));
1723 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1724 assert!(payment_preimage.is_none());
1725 assert_eq!(payment_secret_2, *payment_secret);
1727 _ => panic!("expected PaymentPurpose::InvoicePayment")
1730 _ => panic!("Unexpected event"),
1733 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
1734 assert_eq!(payment_hash_3, *payment_hash);
1735 assert_eq!(1_000_000, amount_msat);
1736 assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1737 assert_eq!(via_channel_id, Some(channel_id));
1739 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1740 assert!(payment_preimage.is_none());
1741 assert_eq!(payment_secret_3, *payment_secret);
1743 _ => panic!("expected PaymentPurpose::InvoicePayment")
1746 _ => panic!("Unexpected event"),
1751 fn test_monitor_update_on_pending_forwards() {
1752 // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1753 // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1754 // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1755 // from C to A will be pending a forward to A.
1756 let chanmon_cfgs = create_chanmon_cfgs(3);
1757 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1758 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1759 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1760 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1761 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1763 // Rebalance a bit so that we can send backwards from 3 to 1.
1764 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1766 let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1767 nodes[2].node.fail_htlc_backwards(&payment_hash_1);
1768 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
1769 check_added_monitors!(nodes[2], 1);
1771 let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1772 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1773 commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1774 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1776 let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
1778 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1779 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1780 check_added_monitors!(nodes[2], 1);
1783 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1784 assert_eq!(events.len(), 1);
1785 let payment_event = SendEvent::from_event(events.pop().unwrap());
1786 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1787 commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1789 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1790 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1791 check_added_monitors!(nodes[1], 1);
1793 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1794 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1795 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1796 check_added_monitors!(nodes[1], 0);
1798 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1799 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1800 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1801 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1803 let events = nodes[0].node.get_and_clear_pending_events();
1804 assert_eq!(events.len(), 3);
1805 if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
1806 assert_eq!(payment_hash, payment_hash_1);
1807 assert!(payment_failed_permanently);
1808 } else { panic!("Unexpected event!"); }
1810 Event::PaymentFailed { payment_hash, .. } => {
1811 assert_eq!(payment_hash, payment_hash_1);
1813 _ => panic!("Unexpected event"),
1816 Event::PendingHTLCsForwardable { .. } => { },
1817 _ => panic!("Unexpected event"),
1819 nodes[0].node.process_pending_htlc_forwards();
1820 expect_payment_claimable!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
1822 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
1826 fn monitor_update_claim_fail_no_response() {
1827 // Test for claim_funds resulting in both a monitor update failure and no message response (due
1828 // to channel being AwaitingRAA).
1829 // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1831 let chanmon_cfgs = create_chanmon_cfgs(2);
1832 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1833 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1834 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1835 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1837 // Forward a payment for B to claim
1838 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1840 // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1841 let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1843 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1844 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1845 check_added_monitors!(nodes[0], 1);
1848 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1849 assert_eq!(events.len(), 1);
1850 let payment_event = SendEvent::from_event(events.pop().unwrap());
1851 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1852 let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1854 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1855 nodes[1].node.claim_funds(payment_preimage_1);
1856 check_added_monitors!(nodes[1], 1);
1858 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1860 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1861 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1862 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1863 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1864 check_added_monitors!(nodes[1], 0);
1865 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1867 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1868 check_added_monitors!(nodes[1], 1);
1869 expect_pending_htlcs_forwardable!(nodes[1]);
1870 expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1872 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1873 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1874 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1875 expect_payment_sent!(nodes[0], payment_preimage_1);
1877 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1880 // restore_b_before_conf has no meaning if !confirm_a_first
1881 // restore_b_before_lock has no meaning if confirm_a_first
1882 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) {
1883 // Test that if the monitor update generated by funding_transaction_generated fails we continue
1884 // the channel setup happily after the update is restored.
1885 let chanmon_cfgs = create_chanmon_cfgs(2);
1886 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1887 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1888 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1890 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1891 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1892 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1894 let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
1896 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1897 check_added_monitors!(nodes[0], 0);
1899 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1900 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1901 let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1902 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1903 check_added_monitors!(nodes[1], 1);
1905 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1906 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1907 check_added_monitors!(nodes[0], 1);
1908 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1909 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1910 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1911 let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1912 nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1913 check_added_monitors!(nodes[0], 0);
1914 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
1916 let events = nodes[0].node.get_and_clear_pending_events();
1917 assert_eq!(events.len(), 0);
1918 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1919 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].txid(), funding_output.txid);
1921 if confirm_a_first {
1922 confirm_transaction(&nodes[0], &funding_tx);
1923 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1924 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1925 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1927 assert!(!restore_b_before_conf);
1928 confirm_transaction(&nodes[1], &funding_tx);
1929 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1932 // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
1933 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1934 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1935 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
1936 reconnect_args.send_channel_ready.1 = confirm_a_first;
1937 reconnect_nodes(reconnect_args);
1939 // But we want to re-emit ChannelPending
1940 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
1941 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1942 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1944 if !restore_b_before_conf {
1945 confirm_transaction(&nodes[1], &funding_tx);
1946 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1947 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1949 if !confirm_a_first && !restore_b_before_lock {
1950 confirm_transaction(&nodes[0], &funding_tx);
1951 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1952 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1953 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1956 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1957 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1958 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1959 check_added_monitors!(nodes[1], 0);
1961 let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1962 if !restore_b_before_lock {
1963 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1964 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1966 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
1967 confirm_transaction(&nodes[0], &funding_tx);
1968 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1969 (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready))
1972 if restore_b_before_conf {
1973 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1974 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1975 confirm_transaction(&nodes[1], &funding_tx);
1977 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1978 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1980 for node in nodes.iter() {
1981 assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
1982 node.gossip_sync.handle_channel_update(&as_update).unwrap();
1983 node.gossip_sync.handle_channel_update(&bs_update).unwrap();
1986 if !restore_b_before_lock {
1987 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
1989 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
1993 send_payment(&nodes[0], &[&nodes[1]], 8000000);
1994 close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1995 check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1996 check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
2000 fn during_funding_monitor_fail() {
2001 do_during_funding_monitor_fail(true, true, false);
2002 do_during_funding_monitor_fail(true, false, false);
2003 do_during_funding_monitor_fail(false, false, false);
2004 do_during_funding_monitor_fail(false, false, true);
2008 fn test_path_paused_mpp() {
2009 // Simple test of sending a multi-part payment where one path is currently blocked awaiting
2011 let chanmon_cfgs = create_chanmon_cfgs(4);
2012 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
2013 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
2014 let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
2016 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
2017 let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
2018 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
2019 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
2021 let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
2023 // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
2024 let path = route.paths[0].clone();
2025 route.paths.push(path);
2026 route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
2027 route.paths[0].hops[0].short_channel_id = chan_1_id;
2028 route.paths[0].hops[1].short_channel_id = chan_3_id;
2029 route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
2030 route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id;
2031 route.paths[1].hops[1].short_channel_id = chan_4_id;
2033 // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
2034 // (for the path 0 -> 2 -> 3) fails.
2035 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2036 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2038 // Now check that we get the right return value, indicating that the first path succeeded but
2039 // the second got a MonitorUpdateInProgress err. This implies
2040 // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry.
2041 if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route(
2042 &route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
2044 assert_eq!(results.len(), 2);
2045 if let Ok(()) = results[0] {} else { panic!(); }
2046 if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); }
2047 } else { panic!(); }
2048 check_added_monitors!(nodes[0], 2);
2049 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2051 // Pass the first HTLC of the payment along to nodes[3].
2052 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2053 assert_eq!(events.len(), 1);
2054 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
2056 // And check that, after we successfully update the monitor for chan_2 we can pass the second
2057 // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
2058 let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
2059 nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2060 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2061 assert_eq!(events.len(), 1);
2062 pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
2064 claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
2068 fn test_pending_update_fee_ack_on_reconnect() {
2069 // In early versions of our automated fee update patch, nodes did not correctly use the
2070 // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an
2071 // undelivered commitment_signed.
2073 // B sends A new HTLC + CS, not delivered
2074 // A sends B update_fee + CS
2075 // B receives the CS and sends RAA, previously causing B to lock in the new feerate
2077 // B resends initial CS, using the original fee
2079 let chanmon_cfgs = create_chanmon_cfgs(2);
2080 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2081 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2082 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2084 create_announced_chan_between_nodes(&nodes, 0, 1);
2085 send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
2087 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000);
2088 nodes[1].node.send_payment_with_route(&route, payment_hash,
2089 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
2090 check_added_monitors!(nodes[1], 1);
2091 let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2092 // bs_initial_send_msgs are not delivered until they are re-generated after reconnect
2095 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2098 nodes[0].node.timer_tick_occurred();
2099 check_added_monitors!(nodes[0], 1);
2100 let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2101 assert!(as_update_fee_msgs.update_fee.is_some());
2103 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap());
2104 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed);
2105 check_added_monitors!(nodes[1], 1);
2106 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2107 // bs_first_raa is not delivered until it is re-generated after reconnect
2109 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2110 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2112 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2113 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2115 let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2116 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2117 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2119 let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2121 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2122 let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
2123 assert_eq!(bs_resend_msgs.len(), 3);
2124 if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] {
2125 assert_eq!(*updates, bs_initial_send_msgs);
2126 } else { panic!(); }
2127 if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] {
2128 assert_eq!(*msg, bs_first_raa);
2129 } else { panic!(); }
2130 if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); }
2132 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2133 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
2135 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]);
2136 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed);
2137 check_added_monitors!(nodes[0], 1);
2138 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2139 check_added_monitors!(nodes[1], 1);
2140 let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed;
2142 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2143 check_added_monitors!(nodes[0], 1);
2144 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed);
2145 check_added_monitors!(nodes[1], 1);
2146 let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2148 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
2149 check_added_monitors!(nodes[0], 1);
2150 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2151 check_added_monitors!(nodes[0], 1);
2153 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2154 check_added_monitors!(nodes[1], 1);
2156 expect_pending_htlcs_forwardable!(nodes[0]);
2157 expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000);
2159 claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
2163 fn test_fail_htlc_on_broadcast_after_claim() {
2164 // In an earlier version of 7e78fa660cec8a73286c94c1073ee588140e7a01 we'd also fail the inbound
2165 // channel backwards if we received an HTLC failure after a HTLC fulfillment. Here we test a
2166 // specific case of that by having the HTLC failure come from the ChannelMonitor after a dust
2167 // HTLC was not included in a confirmed commitment transaction.
2169 // We first forward a payment, then claim it with an update_fulfill_htlc message, closing the
2170 // channel immediately before commitment occurs. After the commitment transaction reaches
2171 // ANTI_REORG_DELAY confirmations, will will try to fail the HTLC which was already fulfilled.
2172 let chanmon_cfgs = create_chanmon_cfgs(3);
2173 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2174 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2175 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2177 create_announced_chan_between_nodes(&nodes, 0, 1);
2178 let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2180 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
2182 let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
2183 assert_eq!(bs_txn.len(), 1);
2185 nodes[2].node.claim_funds(payment_preimage);
2186 check_added_monitors!(nodes[2], 1);
2187 expect_payment_claimed!(nodes[2], payment_hash, 2000);
2189 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2190 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
2191 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2192 check_added_monitors!(nodes[1], 1);
2193 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2195 mine_transaction(&nodes[1], &bs_txn[0]);
2196 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2197 check_closed_broadcast!(nodes[1], true);
2198 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2199 check_added_monitors!(nodes[1], 1);
2200 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2202 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
2203 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
2204 commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
2205 expect_payment_path_successful!(nodes[0]);
2208 fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
2209 // In early versions we did not handle resending of update_fee on reconnect correctly. The
2210 // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
2212 let chanmon_cfgs = create_chanmon_cfgs(2);
2213 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2214 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2215 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2217 create_announced_chan_between_nodes(&nodes, 0, 1);
2218 send_payment(&nodes[0], &[&nodes[1]], 1000);
2221 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2222 *feerate_lock += 20;
2224 nodes[0].node.timer_tick_occurred();
2225 check_added_monitors!(nodes[0], 1);
2226 let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2227 assert!(update_msgs.update_fee.is_some());
2229 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2232 if parallel_updates {
2234 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2235 *feerate_lock += 20;
2237 nodes[0].node.timer_tick_occurred();
2238 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2241 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2242 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2244 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2245 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2247 let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2248 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2249 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2251 let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2253 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2254 get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
2255 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2257 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2258 let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2259 assert_eq!(as_reconnect_msgs.len(), 2);
2260 if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); }
2261 let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap()
2262 { updates } else { panic!(); };
2263 assert!(update_msgs.update_fee.is_some());
2264 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2265 if parallel_updates {
2266 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed);
2267 check_added_monitors!(nodes[1], 1);
2268 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2269 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2270 check_added_monitors!(nodes[0], 1);
2271 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2273 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2274 check_added_monitors!(nodes[0], 1);
2275 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2277 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap());
2278 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
2279 check_added_monitors!(nodes[1], 1);
2280 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2282 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2283 let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2284 check_added_monitors!(nodes[1], 1);
2286 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2287 check_added_monitors!(nodes[0], 1);
2289 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed);
2290 check_added_monitors!(nodes[0], 1);
2291 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2293 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2294 check_added_monitors!(nodes[1], 1);
2296 commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false);
2299 send_payment(&nodes[0], &[&nodes[1]], 1000);
2302 fn update_fee_resend_test() {
2303 do_update_fee_resend_test(false, false);
2304 do_update_fee_resend_test(true, false);
2305 do_update_fee_resend_test(false, true);
2306 do_update_fee_resend_test(true, true);
2309 fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
2310 // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
2311 // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
2312 // that behavior was both somewhat unexpected and also broken (there was a debug assertion
2313 // which failed in such a case).
2314 let chanmon_cfgs = create_chanmon_cfgs(2);
2315 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2317 let new_chain_monitor;
2318 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2319 let nodes_0_deserialized;
2320 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2322 let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2;
2323 let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
2324 let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
2326 // Do a really complicated dance to get an HTLC into the holding cell, with
2327 // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any
2328 // attempts to send an HTLC while MonitorUpdateInProgress is set are immediately
2329 // failed-backwards. Thus, the only way to get an AddHTLC into the holding cell is to add it
2330 // while AwaitingRemoteRevoke is set but MonitorUpdateInProgress is unset, and then swap the
2334 // a) routing a payment from node B to node A,
2335 // b) sending a payment from node A to node B without delivering any of the generated messages,
2336 // putting node A in AwaitingRemoteRevoke,
2337 // c) sending a second payment from node A to node B, which is immediately placed in the
2339 // d) claiming the first payment from B, allowing us to fail the monitor update which occurs
2340 // when we try to persist the payment preimage,
2341 // e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
2342 // clearing AwaitingRemoteRevoke on node A.
2344 // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
2345 // (c) will not be freed from the holding cell.
2346 let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
2348 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
2349 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
2350 check_added_monitors!(nodes[0], 1);
2351 let send = SendEvent::from_node(&nodes[0]);
2352 assert_eq!(send.msgs.len(), 1);
2354 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
2355 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
2356 check_added_monitors!(nodes[0], 0);
2358 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2359 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2360 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2361 nodes[0].node.claim_funds(payment_preimage_0);
2362 check_added_monitors!(nodes[0], 1);
2364 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
2365 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
2366 check_added_monitors!(nodes[1], 1);
2368 let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2370 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
2371 check_added_monitors!(nodes[0], 1);
2374 // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
2375 // disconnect the peers. Note that the fuzzer originally found this issue because
2376 // deserializing a ChannelManager in this state causes an assertion failure.
2378 reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
2379 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2380 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2382 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2384 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2386 // Now reconnect the two
2387 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2388 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2390 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
2391 assert_eq!(reestablish_1.len(), 1);
2392 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2393 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2395 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
2396 assert_eq!(reestablish_2.len(), 1);
2398 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
2399 let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
2400 check_added_monitors!(nodes[1], 0);
2402 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
2403 let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
2405 assert!(resp_0.0.is_none());
2406 assert!(resp_0.1.is_none());
2407 assert!(resp_0.2.is_none());
2408 assert!(resp_1.0.is_none());
2409 assert!(resp_1.1.is_none());
2411 // Check that the freshly-generated cs is equal to the original (which we will deliver in a
2413 if let Some(pending_cs) = resp_1.2 {
2414 assert!(pending_cs.update_add_htlcs.is_empty());
2415 assert!(pending_cs.update_fail_htlcs.is_empty());
2416 assert!(pending_cs.update_fulfill_htlcs.is_empty());
2417 assert_eq!(pending_cs.commitment_signed, cs);
2418 } else { panic!(); }
2421 // The two pending monitor updates were replayed (but are still pending).
2422 check_added_monitors(&nodes[0], 2);
2424 // There should be no monitor updates as we are still pending awaiting a failed one.
2425 check_added_monitors(&nodes[0], 0);
2427 check_added_monitors(&nodes[1], 0);
2430 // If we finish updating the monitor, we should free the holding cell right away (this did
2431 // not occur prior to #756).
2432 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2433 let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
2434 nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
2435 expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
2437 // New outbound messages should be generated immediately upon a call to
2438 // get_and_clear_pending_msg_events (but not before).
2439 check_added_monitors!(nodes[0], 0);
2440 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2441 check_added_monitors!(nodes[0], 1);
2442 assert_eq!(events.len(), 1);
2444 // Deliver the pending in-flight CS
2445 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
2446 check_added_monitors!(nodes[0], 1);
2448 let commitment_msg = match events.pop().unwrap() {
2449 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2450 assert_eq!(node_id, nodes[1].node.get_our_node_id());
2451 assert!(updates.update_fail_htlcs.is_empty());
2452 assert!(updates.update_fail_malformed_htlcs.is_empty());
2453 assert!(updates.update_fee.is_none());
2454 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2455 nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
2456 expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false);
2457 assert_eq!(updates.update_add_htlcs.len(), 1);
2458 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
2459 updates.commitment_signed
2461 _ => panic!("Unexpected event type!"),
2464 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg);
2465 check_added_monitors!(nodes[1], 1);
2467 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2468 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
2469 expect_pending_htlcs_forwardable!(nodes[1]);
2470 expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
2471 check_added_monitors!(nodes[1], 1);
2473 commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false);
2475 let events = nodes[1].node.get_and_clear_pending_events();
2476 assert_eq!(events.len(), 2);
2478 Event::PendingHTLCsForwardable { .. } => { },
2479 _ => panic!("Unexpected event"),
2482 Event::PaymentPathSuccessful { .. } => { },
2483 _ => panic!("Unexpected event"),
2486 nodes[1].node.process_pending_htlc_forwards();
2487 expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 100000);
2489 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
2490 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
2493 fn channel_holding_cell_serialize() {
2494 do_channel_holding_cell_serialize(true, true);
2495 do_channel_holding_cell_serialize(true, false);
2496 do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
2499 #[derive(PartialEq)]
2500 enum HTLCStatusAtDupClaim {
2505 fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
2506 // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2507 // along the payment path before waiting for a full commitment_signed dance. This is great, but
2508 // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2509 // reconnects, and then has to re-send its update_fulfill_htlc message again.
2510 // In previous code, we didn't handle the double-claim correctly, spuriously closing the
2511 // channel on which the inbound HTLC was received.
2512 let chanmon_cfgs = create_chanmon_cfgs(3);
2513 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2514 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2515 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2517 create_announced_chan_between_nodes(&nodes, 0, 1);
2518 let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2520 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
2522 let mut as_raa = None;
2523 if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2524 // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2525 // awaiting a remote revoke_and_ack from nodes[0].
2526 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
2527 nodes[0].node.send_payment_with_route(&route, second_payment_hash,
2528 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
2529 check_added_monitors!(nodes[0], 1);
2531 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2532 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2533 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2534 check_added_monitors!(nodes[1], 1);
2536 let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2537 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2538 check_added_monitors!(nodes[0], 1);
2539 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2540 check_added_monitors!(nodes[0], 1);
2542 as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2545 let fulfill_msg = msgs::UpdateFulfillHTLC {
2546 channel_id: chan_id_2,
2551 nodes[2].node.fail_htlc_backwards(&payment_hash);
2552 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
2553 check_added_monitors!(nodes[2], 1);
2554 get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2556 nodes[2].node.claim_funds(payment_preimage);
2557 check_added_monitors!(nodes[2], 1);
2558 expect_payment_claimed!(nodes[2], payment_hash, 100_000);
2560 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2561 assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2562 // Check that the message we're about to deliver matches the one generated:
2563 assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
2565 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
2566 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2567 check_added_monitors!(nodes[1], 1);
2569 let mut bs_updates = None;
2570 if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2571 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2572 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2573 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2574 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
2575 if htlc_status == HTLCStatusAtDupClaim::Cleared {
2576 commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2577 expect_payment_path_successful!(nodes[0]);
2580 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2583 nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
2584 nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2587 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
2588 reconnect_args.pending_htlc_fails.0 = 1;
2589 reconnect_nodes(reconnect_args);
2590 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2592 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
2593 reconnect_args.pending_htlc_claims.0 = 1;
2594 reconnect_nodes(reconnect_args);
2597 if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2598 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2599 check_added_monitors!(nodes[1], 1);
2600 expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2602 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2603 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2604 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2605 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
2607 if htlc_status != HTLCStatusAtDupClaim::Cleared {
2608 commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2609 expect_payment_path_successful!(nodes[0]);
2614 fn test_reconnect_dup_htlc_claims() {
2615 do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
2616 do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
2617 do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
2618 do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
2619 do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
2620 do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
2624 fn test_temporary_error_during_shutdown() {
2625 // Test that temporary failures when updating the monitor's shutdown script delay cooperative
2627 let mut config = test_default_channel_config();
2628 config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2630 let chanmon_cfgs = create_chanmon_cfgs(2);
2631 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2632 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
2633 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2635 let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2637 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2638 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2640 nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
2641 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
2642 check_added_monitors!(nodes[1], 1);
2644 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
2645 check_added_monitors!(nodes[0], 1);
2647 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2649 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2650 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2652 let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2653 nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2654 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()));
2656 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2658 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2659 let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2660 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2662 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
2663 let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
2664 let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2666 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap());
2667 let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
2668 assert!(none_b.is_none());
2669 let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2671 assert_eq!(txn_a, txn_b);
2672 assert_eq!(txn_a.len(), 1);
2673 check_spends!(txn_a[0], funding_tx);
2674 check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
2675 check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
2679 fn test_permanent_error_during_sending_shutdown() {
2680 // Test that permanent failures when updating the monitor's shutdown script result in a force
2681 // close when initiating a cooperative close.
2682 let mut config = test_default_channel_config();
2683 config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2685 let chanmon_cfgs = create_chanmon_cfgs(2);
2686 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2687 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
2688 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2690 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2691 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2693 assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2695 // We always send the `shutdown` response when initiating a shutdown, even if we immediately
2696 // close the channel thereafter.
2697 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
2698 assert_eq!(msg_events.len(), 3);
2699 if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2700 if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2701 if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
2703 check_added_monitors!(nodes[0], 2);
2704 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
2705 [nodes[1].node.get_our_node_id()], 100000);
2709 fn test_permanent_error_during_handling_shutdown() {
2710 // Test that permanent failures when updating the monitor's shutdown script result in a force
2711 // close when handling a cooperative close.
2712 let mut config = test_default_channel_config();
2713 config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2715 let chanmon_cfgs = create_chanmon_cfgs(2);
2716 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2717 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
2718 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2720 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2721 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2723 assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2724 let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
2725 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
2727 // We always send the `shutdown` response when receiving a shutdown, even if we immediately
2728 // close the channel thereafter.
2729 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2730 assert_eq!(msg_events.len(), 3);
2731 if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2732 if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2733 if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
2735 check_added_monitors!(nodes[1], 2);
2736 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
2737 [nodes[0].node.get_our_node_id()], 100000);
2741 fn double_temp_error() {
2742 // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row.
2743 let chanmon_cfgs = create_chanmon_cfgs(2);
2744 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2745 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2746 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2748 let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
2750 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2751 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2753 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2754 // `claim_funds` results in a ChannelMonitorUpdate.
2755 nodes[1].node.claim_funds(payment_preimage_1);
2756 check_added_monitors!(nodes[1], 1);
2757 let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2759 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2760 // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
2761 // which had some asserts that prevented it from being called twice.
2762 nodes[1].node.claim_funds(payment_preimage_2);
2763 check_added_monitors!(nodes[1], 1);
2764 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2766 let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2767 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
2768 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2769 check_added_monitors!(nodes[1], 0);
2770 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
2772 // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
2773 // and get both PaymentClaimed events at once.
2774 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2776 let events = nodes[1].node.get_and_clear_pending_events();
2777 assert_eq!(events.len(), 2);
2779 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
2780 _ => panic!("Unexpected Event: {:?}", events[0]),
2783 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
2784 _ => panic!("Unexpected Event: {:?}", events[1]),
2787 assert_eq!(msg_events.len(), 1);
2788 let (update_fulfill_1, commitment_signed_b1, node_id) = {
2789 match &msg_events[0] {
2790 &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2791 assert!(update_add_htlcs.is_empty());
2792 assert_eq!(update_fulfill_htlcs.len(), 1);
2793 assert!(update_fail_htlcs.is_empty());
2794 assert!(update_fail_malformed_htlcs.is_empty());
2795 assert!(update_fee.is_none());
2796 (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
2798 _ => panic!("Unexpected event"),
2801 assert_eq!(node_id, nodes[0].node.get_our_node_id());
2802 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
2803 check_added_monitors!(nodes[0], 0);
2804 expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2805 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
2806 check_added_monitors!(nodes[0], 1);
2807 nodes[0].node.process_pending_htlc_forwards();
2808 let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2809 check_added_monitors!(nodes[1], 0);
2810 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2811 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1);
2812 check_added_monitors!(nodes[1], 1);
2813 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1);
2814 check_added_monitors!(nodes[1], 1);
2816 // Complete the second HTLC.
2817 let ((update_fulfill_2, commitment_signed_b2), raa_b2) = {
2818 let events = nodes[1].node.get_and_clear_pending_msg_events();
2819 assert_eq!(events.len(), 2);
2821 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2822 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2823 assert!(updates.update_add_htlcs.is_empty());
2824 assert!(updates.update_fail_htlcs.is_empty());
2825 assert!(updates.update_fail_malformed_htlcs.is_empty());
2826 assert!(updates.update_fee.is_none());
2827 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2828 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
2830 _ => panic!("Unexpected event"),
2833 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
2834 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2837 _ => panic!("Unexpected event"),
2840 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2);
2841 check_added_monitors!(nodes[0], 1);
2842 expect_payment_path_successful!(nodes[0]);
2844 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2);
2845 check_added_monitors!(nodes[0], 0);
2846 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2847 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
2848 expect_payment_sent!(nodes[0], payment_preimage_2);
2851 fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
2852 // Test that if the monitor update generated in funding_signed is stored async and we restart
2853 // with the latest ChannelManager but the ChannelMonitor persistence never completed we happily
2854 // drop the channel and move on.
2855 let chanmon_cfgs = create_chanmon_cfgs(2);
2856 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2859 let new_chain_monitor;
2861 let mut chan_config = test_default_channel_config();
2862 chan_config.manually_accept_inbound_channels = true;
2863 chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2865 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2866 let nodes_0_deserialized;
2868 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2870 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2871 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2873 let events = nodes[1].node.get_and_clear_pending_events();
2874 assert_eq!(events.len(), 1);
2876 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2878 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2880 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2883 _ => panic!("Unexpected event"),
2886 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2888 let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2890 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2891 check_added_monitors!(nodes[0], 0);
2893 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2894 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2895 check_added_monitors!(nodes[1], 1);
2896 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
2898 let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
2899 assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
2900 match &bs_signed_locked[0] {
2901 MessageSendEvent::SendFundingSigned { msg, .. } => {
2902 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2904 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
2905 check_added_monitors!(nodes[0], 1);
2907 _ => panic!("Unexpected event"),
2910 match &bs_signed_locked[1] {
2911 MessageSendEvent::SendChannelReady { msg, .. } => {
2912 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
2914 _ => panic!("Unexpected event"),
2918 assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
2919 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2920 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2922 // nodes[0] is now waiting on the first ChannelMonitor persistence to complete in order to
2923 // broadcast the funding transaction. If nodes[0] restarts at this point with the
2924 // ChannelMonitor lost, we should simply discard the channel.
2926 // The test framework checks that watched_txn/outputs match the monitor set, which they will
2927 // not, so we have to clear them here.
2928 nodes[0].chain_source.watched_txn.lock().unwrap().clear();
2929 nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
2931 reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
2932 check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 100000);
2933 assert!(nodes[0].node.list_channels().is_empty());
2937 fn test_outbound_reload_without_init_mon() {
2938 do_test_outbound_reload_without_init_mon(true);
2939 do_test_outbound_reload_without_init_mon(false);
2942 fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: bool) {
2943 // Test that if the monitor update generated by funding_transaction_generated is stored async
2944 // and we restart with the latest ChannelManager but the ChannelMonitor persistence never
2945 // completed we happily drop the channel and move on.
2946 let chanmon_cfgs = create_chanmon_cfgs(2);
2947 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2950 let new_chain_monitor;
2952 let mut chan_config = test_default_channel_config();
2953 chan_config.manually_accept_inbound_channels = true;
2954 chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2956 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2957 let nodes_1_deserialized;
2959 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2961 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2962 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2964 let events = nodes[1].node.get_and_clear_pending_events();
2965 assert_eq!(events.len(), 1);
2967 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2969 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2971 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2974 _ => panic!("Unexpected event"),
2977 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2979 let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2981 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2982 check_added_monitors!(nodes[0], 0);
2984 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2985 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2986 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2987 check_added_monitors!(nodes[1], 1);
2989 // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the
2990 // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding
2991 // transaction is confirmed.
2992 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
2994 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
2995 check_added_monitors!(nodes[0], 1);
2996 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
2998 let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2999 if lock_commitment {
3000 confirm_transaction(&nodes[0], &as_funding_tx[0]);
3001 confirm_transaction(&nodes[1], &as_funding_tx[0]);
3003 if use_0conf || lock_commitment {
3004 let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
3005 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_ready);
3007 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3009 // nodes[1] is now waiting on the first ChannelMonitor persistence to complete in order to
3010 // move the channel to ready (or is waiting on the funding transaction to confirm). If nodes[1]
3011 // restarts at this point with the ChannelMonitor lost, we should simply discard the channel.
3013 // The test framework checks that watched_txn/outputs match the monitor set, which they will
3014 // not, so we have to clear them here.
3015 nodes[1].chain_source.watched_txn.lock().unwrap().clear();
3016 nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
3018 reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
3020 check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 100000);
3021 assert!(nodes[1].node.list_channels().is_empty());
3025 fn test_inbound_reload_without_init_mon() {
3026 do_test_inbound_reload_without_init_mon(true, true);
3027 do_test_inbound_reload_without_init_mon(true, false);
3028 do_test_inbound_reload_without_init_mon(false, true);
3029 do_test_inbound_reload_without_init_mon(false, false);
3033 fn test_blocked_chan_preimage_release() {
3034 // Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to
3035 // be handled HTLC preimage `ChannelMonitorUpdate`s will still go out.
3036 let chanmon_cfgs = create_chanmon_cfgs(3);
3037 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3038 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3039 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3041 create_announced_chan_between_nodes(&nodes, 0, 1);
3042 let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3044 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
3046 // Tee up two payments in opposite directions across nodes[1], one it sent to generate a
3047 // PaymentSent event and one it forwards.
3048 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
3049 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
3051 // Claim the first payment to get a `PaymentSent` event (but don't handle it yet).
3052 nodes[2].node.claim_funds(payment_preimage_1);
3053 check_added_monitors(&nodes[2], 1);
3054 expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);
3056 let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3057 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
3058 do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, false, false);
3059 check_added_monitors(&nodes[1], 0);
3061 // Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to
3062 // claim an HTLC on its channel with nodes[2], but that channel is blocked on the above
3063 // `PaymentSent` event.
3064 nodes[0].node.claim_funds(payment_preimage_2);
3065 check_added_monitors(&nodes[0], 1);
3066 expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000);
3068 let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
3069 nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
3070 check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
3071 assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2));
3072 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3074 // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the
3075 // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the
3077 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed);
3078 check_added_monitors(&nodes[1], 1);
3079 let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
3080 assert!(a.is_none());
3082 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
3083 check_added_monitors(&nodes[1], 0);
3084 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3086 let events = nodes[1].node.get_and_clear_pending_events();
3087 assert_eq!(events.len(), 3);
3088 if let Event::PaymentSent { .. } = events[0] {} else { panic!(); }
3089 if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
3090 if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
3092 // The event processing should release the last RAA updates on both channels.
3093 check_added_monitors(&nodes[1], 2);
3095 // When we fetch the next update the message getter will generate the next update for nodes[2],
3096 // generating a further monitor update.
3097 let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3098 check_added_monitors(&nodes[1], 1);
3100 nodes[2].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
3101 do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
3102 expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
3105 fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) {
3106 // When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages
3107 // from the downstream channel, we immediately claim the HTLC on the upstream channel, before
3108 // even doing a `commitment_signed` dance on the downstream channel. This implies that our
3109 // `ChannelMonitorUpdate`s are generated in the right order - first we ensure we'll get our
3110 // money, then we write the update that resolves the downstream node claiming their money. This
3111 // is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are
3112 // generated, but of course this may not be the case. For asynchronous update writes, we have
3113 // to ensure monitor updates can block each other, preventing the inversion all together.
3114 let chanmon_cfgs = create_chanmon_cfgs(3);
3115 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3118 let new_chain_monitor;
3119 let nodes_1_deserialized;
3121 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3122 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3124 let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3125 let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3127 // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3128 // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
3129 // on the B<->C channel but leave the A<->B monitor update pending, then reload B.
3130 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
3132 let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3133 let mut manager_b = Vec::new();
3134 if !with_latest_manager {
3135 manager_b = nodes[1].node.encode();
3138 nodes[2].node.claim_funds(payment_preimage);
3139 check_added_monitors(&nodes[2], 1);
3140 expect_payment_claimed!(nodes[2], payment_hash, 100_000);
3142 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3143 let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3144 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3146 // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3147 // for it since the monitor update is marked in-progress.
3148 check_added_monitors(&nodes[1], 1);
3149 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3151 // Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we
3152 // won't get the preimage when the nodes reconnect and we have to get it from the
3154 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3155 check_added_monitors(&nodes[1], 1);
3156 if complete_bc_commitment_dance {
3157 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3158 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3159 check_added_monitors(&nodes[2], 1);
3160 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3161 check_added_monitors(&nodes[2], 1);
3162 let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3164 // At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the
3165 // preimage in the A <-> B channel, which will prevent it from persisting the
3166 // `ChannelMonitorUpdate` for the B<->C channel here to avoid "losing" the preimage.
3167 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_raa);
3168 check_added_monitors(&nodes[1], 0);
3169 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3172 // Now reload node B
3173 if with_latest_manager {
3174 manager_b = nodes[1].node.encode();
3177 let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3178 reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3180 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3181 nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3183 if with_latest_manager {
3184 // If we used the latest ChannelManager to reload from, we should have both channels still
3185 // live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
3186 // before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
3187 // When we call `timer_tick_occurred` we will get that monitor update back, which we'll
3188 // complete after reconnecting to our peers.
3189 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3190 nodes[1].node.timer_tick_occurred();
3191 check_added_monitors(&nodes[1], 1);
3192 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3194 // Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
3195 // the end go ahead and do that, though the
3196 // `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
3197 // expect to *not* receive the final RAA ChannelMonitorUpdate.
3198 if complete_bc_commitment_dance {
3199 reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
3201 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
3202 reconnect_args.pending_responding_commitment_signed.1 = true;
3203 reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3204 reconnect_args.pending_raa = (false, true);
3205 reconnect_nodes(reconnect_args);
3208 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3210 // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
3211 // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
3213 let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3214 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3216 // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has
3217 // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
3220 // If the ChannelManager used in the reload was stale, check that the B <-> C channel was
3223 // Note that this will also process the ChannelMonitorUpdates which were queued up when we
3224 // reloaded the ChannelManager. This will re-emit the A<->B preimage as well as the B<->C
3225 // force-closure ChannelMonitorUpdate. Once the A<->B preimage update completes, the claim
3226 // commitment update will be allowed to go out.
3227 check_added_monitors(&nodes[1], 0);
3228 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3229 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3230 check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100_000);
3231 check_added_monitors(&nodes[1], 2);
3233 nodes[1].node.timer_tick_occurred();
3234 check_added_monitors(&nodes[1], 0);
3236 // Don't bother to reconnect B to C - that channel has been closed. We don't need to
3237 // exchange any messages here even though there's a pending commitment update because the
3238 // ChannelMonitorUpdate hasn't yet completed.
3239 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3241 let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3242 nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3244 // The ChannelMonitorUpdate which was completed prior to the reconnect only contained the
3245 // preimage (as it was a replay of the original ChannelMonitorUpdate from before we
3246 // restarted). When we go to fetch the commitment transaction updates we'll poll the
3247 // ChannelMonitorUpdate completion, then generate (and complete) a new ChannelMonitorUpdate
3248 // with the actual commitment transaction, which will allow us to fulfill the HTLC with
3252 let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
3253 check_added_monitors(&nodes[1], 1);
3255 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
3256 do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
3258 expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager);
3260 // Finally, check that the payment was, ultimately, seen as sent by node A.
3261 expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
3265 fn test_inverted_mon_completion_order() {
3266 do_test_inverted_mon_completion_order(true, true);
3267 do_test_inverted_mon_completion_order(true, false);
3268 do_test_inverted_mon_completion_order(false, true);
3269 do_test_inverted_mon_completion_order(false, false);
3272 fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool) {
3273 // Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel
3274 // is force-closed between when we generate the update on reload and when we go to handle the
3275 // update or prior to generating the update at all.
3277 if !close_chans_before_reload && close_only_a {
3278 // If we're not closing, it makes no sense to "only close A"
3282 let chanmon_cfgs = create_chanmon_cfgs(3);
3283 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3286 let new_chain_monitor;
3287 let nodes_1_deserialized;
3289 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3290 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3292 let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3293 let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3295 // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3296 // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
3297 // on the B<->C channel but leave the A<->B monitor update pending, then reload B.
3298 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3300 let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3302 nodes[2].node.claim_funds(payment_preimage);
3303 check_added_monitors(&nodes[2], 1);
3304 expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3306 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3307 let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3308 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3310 // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3311 // for it since the monitor update is marked in-progress.
3312 check_added_monitors(&nodes[1], 1);
3313 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3315 // Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get
3316 // the preimage when the nodes reconnect, at which point we have to ensure we get it from the
3318 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3319 check_added_monitors(&nodes[1], 1);
3320 let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3322 let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3324 if close_chans_before_reload {
3326 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3327 nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
3328 check_closed_broadcast(&nodes[1], 1, true);
3329 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
3332 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3333 nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
3334 check_closed_broadcast(&nodes[1], 1, true);
3335 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
3338 // Now reload node B
3339 let manager_b = nodes[1].node.encode();
3340 reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3342 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3343 nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3345 if close_chans_before_reload {
3346 // If the channels were already closed, B will rebroadcast its closing transactions here.
3347 let bs_close_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3349 assert_eq!(bs_close_txn.len(), 2);
3351 assert_eq!(bs_close_txn.len(), 3);
3355 nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
3356 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
3357 let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3358 assert_eq!(as_closing_tx.len(), 1);
3360 // In order to give A's closing transaction to B without processing background events first,
3361 // use the _without_consistency_checks utility method. This is similar to connecting blocks
3362 // during startup prior to the node being full initialized.
3363 mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
3365 // After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B
3366 // ChannelMonitor (possible twice), even though the channel has since been closed.
3367 check_added_monitors(&nodes[1], 0);
3368 let mons_added = if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 };
3369 if hold_post_reload_mon_update {
3370 for _ in 0..mons_added {
3371 persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3374 nodes[1].node.timer_tick_occurred();
3375 check_added_monitors(&nodes[1], mons_added);
3377 // Finally, check that B created a payment preimage transaction and close out the payment.
3378 let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3379 assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 });
3380 let bs_preimage_tx = &bs_txn[0];
3381 check_spends!(bs_preimage_tx, as_closing_tx[0]);
3383 if !close_chans_before_reload {
3384 check_closed_broadcast(&nodes[1], 1, true);
3385 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
3387 // While we forwarded the payment a while ago, we don't want to process events too early or
3388 // we'll run background tasks we wanted to test individually.
3389 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
3392 mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
3393 check_closed_broadcast(&nodes[0], 1, true);
3394 expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
3396 if !close_chans_before_reload || close_only_a {
3397 // Make sure the B<->C channel is still alive and well by sending a payment over it.
3398 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
3399 reconnect_args.pending_responding_commitment_signed.1 = true;
3400 if !close_chans_before_reload {
3401 // TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
3402 // will consider the forwarded payment complete and allow the B<->C
3403 // `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
3404 // be allowed, and needs fixing.
3405 reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3407 reconnect_args.pending_raa.1 = true;
3409 reconnect_nodes(reconnect_args);
3410 let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3411 nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
3412 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
3413 if !close_chans_before_reload {
3414 // Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
3415 // channel will fly, removing the payment preimage from it.
3416 check_added_monitors(&nodes[1], 1);
3418 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3419 send_payment(&nodes[1], &[&nodes[2]], 100_000);
3424 fn test_durable_preimages_on_closed_channel() {
3425 do_test_durable_preimages_on_closed_channel(true, true, true);
3426 do_test_durable_preimages_on_closed_channel(true, true, false);
3427 do_test_durable_preimages_on_closed_channel(true, false, true);
3428 do_test_durable_preimages_on_closed_channel(true, false, false);
3429 do_test_durable_preimages_on_closed_channel(false, false, true);
3430 do_test_durable_preimages_on_closed_channel(false, false, false);
3433 fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
3434 // Test that if a `ChannelMonitorUpdate` completes but a `ChannelManager` isn't serialized
3435 // before restart we run the monitor update completion action on startup.
3436 let chanmon_cfgs = create_chanmon_cfgs(3);
3437 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3440 let new_chain_monitor;
3441 let nodes_1_deserialized;
3443 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3444 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3446 let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3447 let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3449 // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3450 // `update_fulfill_htlc`+`commitment_signed` we have a monitor update for both of B's channels.
3451 // We complete the commitment signed dance on the B<->C channel but leave the A<->B monitor
3452 // update pending, then reload B. At that point, the final monitor update on the B<->C channel
3453 // is still pending because it can't fly until the preimage is persisted on the A<->B monitor.
3454 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3456 nodes[2].node.claim_funds(payment_preimage);
3457 check_added_monitors(&nodes[2], 1);
3458 expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3460 chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3461 let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3462 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3464 // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3465 // for it since the monitor update is marked in-progress.
3466 check_added_monitors(&nodes[1], 1);
3467 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3469 // Now step the Commitment Signed Dance between B and C and check that after the final RAA B
3470 // doesn't let the preimage-removing monitor update fly.
3471 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3472 check_added_monitors(&nodes[1], 1);
3473 let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3475 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
3476 check_added_monitors(&nodes[2], 1);
3477 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
3478 check_added_monitors(&nodes[2], 1);
3480 let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3481 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_final_raa);
3482 check_added_monitors(&nodes[1], 0);
3484 // Finally, reload node B and check that after we call `process_pending_events` once we realize
3485 // we've completed the A<->B preimage-including monitor update and so can release the B<->C
3486 // preimage-removing monitor update.
3487 let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3488 let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3489 let manager_b = nodes[1].node.encode();
3490 reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3492 if close_during_reload {
3493 // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded
3494 // (as learned about during the on-reload block connection).
3495 nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
3496 check_added_monitors!(nodes[0], 1);
3497 check_closed_broadcast!(nodes[0], true);
3498 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
3499 let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3500 mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
3503 let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
3504 let mut events = nodes[1].node.get_and_clear_pending_events();
3505 assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
3506 expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), close_during_reload, false);
3507 if close_during_reload {
3509 Event::ChannelClosed { .. } => {},
3512 check_closed_broadcast!(nodes[1], true);
3515 // Once we run event processing the monitor should free, check that it was indeed the B<->C
3516 // channel which was updated.
3517 check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 });
3518 let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
3519 assert!(bc_update_id != post_ev_bc_update_id);
3521 // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates
3523 nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3524 reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
3525 send_payment(&nodes[1], &[&nodes[2]], 100_000);
3529 fn test_reload_mon_update_completion_actions() {
3530 do_test_reload_mon_update_completion_actions(true);
3531 do_test_reload_mon_update_completion_actions(false);