X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Freload_tests.rs;h=bf2fb1de1e30d4a9ff3bf1fa5498d8f37fbbf5f8;hb=e23102f5655c87896f01c721ea5342fbdb81fae9;hp=2ca3f21fe940270dc16438cd79a157c529e2f7a8;hpb=8ecd7c30c903b9e2db0c280110200f7c61565c11;p=rust-lightning diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 2ca3f21f..bf2fb1de 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -12,17 +12,18 @@ use crate::chain::{ChannelMonitorUpdateStatus, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::ChannelMonitor; -use crate::chain::keysinterface::EntropySource; +use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; -use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId}; +use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; +use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use crate::util::enforcing_trait_impls::EnforcingSigner; use crate::util::test_utils; use crate::util::errors::APIError; -use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::config::UserConfig; +use crate::util::string::UntrustedString; use bitcoin::hash_types::BlockHash; @@ -44,8 +45,8 @@ fn test_funding_peer_disconnect() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); confirm_transaction(&nodes[0], &tx); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -53,16 +54,20 @@ fn test_funding_peer_disconnect() { reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); confirm_transaction(&nodes[1], &tx); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert!(events_2.is_empty()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect. @@ -140,7 +145,7 @@ fn test_funding_peer_disconnect() { assert_eq!(events_7.len(), 1); let (chan_announcement, as_update) = match events_7[0] { MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { - (msg.clone(), update_msg.clone()) + (msg.clone(), update_msg.clone().unwrap()) }, _ => panic!("Unexpected event {:?}", events_7[0]), }; @@ -153,7 +158,7 @@ fn test_funding_peer_disconnect() { let bs_update = match events_8[0] { MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { assert_eq!(*msg, chan_announcement); - update_msg.clone() + update_msg.clone().unwrap() }, _ => panic!("Unexpected event {:?}", events_8[0]), }; @@ -169,7 +174,7 @@ fn test_funding_peer_disconnect() { // Check that after deserialization and reconnection we can still generate an identical // channel_announcement from the cached signatures. - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); @@ -190,15 +195,19 @@ fn test_no_txn_manager_serialize_deserialize() { let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); let chan_0_monitor_serialized = get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode(); reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); @@ -260,6 +269,9 @@ fn test_manager_serialize_deserialize_events() { } // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead + expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id()); + expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id()); + nodes.push(node_a); nodes.push(node_b); @@ -267,7 +279,7 @@ fn test_manager_serialize_deserialize_events() { let chan_0_monitor_serialized = get_monitor!(nodes[0], bs_funding_signed.channel_id).encode(); reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // After deserializing, make sure the funding_transaction is still held by the channel manager let events_4 = nodes[0].node.get_and_clear_pending_events(); @@ -278,9 +290,13 @@ fn test_manager_serialize_deserialize_events() { // Make sure the channel is functioning as though the de/serialization never happened assert_eq!(nodes[0].node.list_channels().len(), 1); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); @@ -313,7 +329,7 @@ fn test_simple_manager_serialize_deserialize() { let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); @@ -353,9 +369,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let nodes_0_serialized = nodes[0].node.encode(); route_payment(&nodes[0], &[&nodes[3]], 1000000); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/ // nodes[3]) @@ -422,20 +438,22 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); - { // Channel close should result in a commitment tx - let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(txn.len(), 1); - check_spends!(txn[0], funding_tx); - assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid()); - } - for monitor in node_0_monitors.drain(..) { assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor), ChannelMonitorUpdateStatus::Completed); check_added_monitors!(nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); + { // Channel close should result in a commitment tx + nodes[0].node.timer_tick_occurred(); + let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(txn.len(), 1); + check_spends!(txn[0], funding_tx); + assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid()); + } + check_added_monitors!(nodes[0], 1); // nodes[1] and nodes[2] have no lost state with nodes[0]... reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -443,9 +461,13 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { //... and we can even still claim the payment! claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage); - nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap(); - nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { + features: nodes[3].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish); let mut found_err = false; for msg_event in nodes[0].node.get_and_clear_pending_msg_events() { @@ -488,14 +510,18 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized); if reconnect_panicing { - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); @@ -543,8 +569,12 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { // after the warning message sent by B, we should not able to // use the channel, or reconnect with success to the channel. assert!(nodes[0].node.list_usable_channels().is_empty()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]); @@ -566,7 +596,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }); check_closed_broadcast!(nodes[1], false); } @@ -600,7 +630,8 @@ fn test_forwardable_regen() { // First send a payment to nodes[1] let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -613,7 +644,8 @@ fn test_forwardable_regen() { // Next send a payment which is forwarded by nodes[1] let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000); - nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route_2, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -627,8 +659,8 @@ fn test_forwardable_regen() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); @@ -691,18 +723,19 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a[0].pubkey == nodes[1].node.get_our_node_id() { + if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 2); // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); - let (node_1_msgs, mut send_events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &send_events); - let (node_2_msgs, _send_events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &send_events); + let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut send_events); + let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut send_events); do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, true, false, None); do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_2_msgs, true, false, None); @@ -753,8 +786,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash)); assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash)); - nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false); - nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id()); // During deserialization, we should have closed one channel and broadcast its latest // commitment transaction. We should also still have the original PaymentClaimable event we @@ -765,6 +798,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); } if persist_both_monitors { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } + check_added_monitors(&nodes[3], 2); + } else { + check_added_monitors(&nodes[3], 1); } // On restart, we should also get a duplicate PaymentClaimed event as we persisted the @@ -779,9 +815,13 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { if !persist_both_monitors { // If one of the two channels is still live, reveal the payment preimage over it. - nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }).unwrap(); + nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { + features: nodes[2].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]); - nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap(); + nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { + features: nodes[3].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]); nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]); @@ -847,11 +887,12 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); if use_intercept { - route.paths[0][1].short_channel_id = intercept_scid; + route.paths[0].hops[1].short_channel_id = intercept_scid; } let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV; - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); @@ -920,10 +961,12 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht }); } + nodes[1].node.timer_tick_occurred(); let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_commitment_tx.len(), 1); + check_added_monitors!(nodes[1], 1); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); if use_cs_commitment { @@ -936,7 +979,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht if claim_htlc { confirm_transaction(&nodes[1], &cs_commitment_tx[1]); } else { - connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1); + connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1); let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_timeout_tx.len(), 1); confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]); @@ -1035,10 +1078,13 @@ fn removed_payment_no_manager_persistence() { _ => panic!("Unexpected event"), } + nodes[1].node.test_process_background_events(); + check_added_monitors(&nodes[1], 1); + // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload, // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set. - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);