X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Freload_tests.rs;h=67a48012789c1e30d6e56e519984f21eefb84eee;hb=ec3aa494953c7d720370119f673e8e6b3a2155d5;hp=6643d34ca17005e8bdeb62a6a52a27f287245903;hpb=be6f263825e0c75d32d6d48fd5dff9986ca6b011;p=rust-lightning diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 6643d34c..67a48012 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -14,15 +14,16 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::keysinterface::EntropySource; use crate::chain::transaction::OutPoint; -use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId}; +use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; +use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use crate::util::enforcing_trait_impls::EnforcingSigner; use crate::util::test_utils; use crate::util::errors::APIError; -use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::config::UserConfig; +use crate::util::string::UntrustedString; use bitcoin::hash_types::BlockHash; @@ -60,9 +61,9 @@ fn test_funding_peer_disconnect() { let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert!(events_2.is_empty()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect. @@ -196,9 +197,9 @@ fn test_no_txn_manager_serialize_deserialize() { get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode(); reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); @@ -260,6 +261,9 @@ fn test_manager_serialize_deserialize_events() { } // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead + expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id()); + expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id()); + nodes.push(node_a); nodes.push(node_b); @@ -278,9 +282,9 @@ fn test_manager_serialize_deserialize_events() { // Make sure the channel is functioning as though the de/serialization never happened assert_eq!(nodes[0].node.list_channels().len(), 1); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); @@ -422,20 +426,22 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); - { // Channel close should result in a commitment tx - let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(txn.len(), 1); - check_spends!(txn[0], funding_tx); - assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid()); - } - for monitor in node_0_monitors.drain(..) { assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor), ChannelMonitorUpdateStatus::Completed); check_added_monitors!(nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); + { // Channel close should result in a commitment tx + nodes[0].node.timer_tick_occurred(); + let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(txn.len(), 1); + check_spends!(txn[0], funding_tx); + assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid()); + } + check_added_monitors!(nodes[0], 1); // nodes[1] and nodes[2] have no lost state with nodes[0]... reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -443,9 +449,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { //... and we can even still claim the payment! claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage); - nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap(); - nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish); let mut found_err = false; for msg_event in nodes[0].node.get_and_clear_pending_msg_events() { @@ -494,8 +500,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized); if reconnect_panicing { - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); @@ -543,8 +549,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { // after the warning message sent by B, we should not able to // use the channel, or reconnect with success to the channel. assert!(nodes[0].node.list_usable_channels().is_empty()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]); @@ -566,7 +572,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }); check_closed_broadcast!(nodes[1], false); } @@ -600,7 +606,8 @@ fn test_forwardable_regen() { // First send a payment to nodes[1] let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -613,7 +620,8 @@ fn test_forwardable_regen() { // Next send a payment which is forwarded by nodes[1] let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000); - nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route_2, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -691,11 +699,12 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a[0].pubkey == nodes[1].node.get_our_node_id() { + if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 2); // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event @@ -779,9 +788,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { if !persist_both_monitors { // If one of the two channels is still live, reveal the payment preimage over it. - nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }).unwrap(); + nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]); - nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap(); + nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]); nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]); @@ -847,11 +856,12 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); if use_intercept { - route.paths[0][1].short_channel_id = intercept_scid; + route.paths[0].hops[1].short_channel_id = intercept_scid; } let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV; - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); @@ -920,8 +930,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht }); } + nodes[1].node.timer_tick_occurred(); let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_commitment_tx.len(), 1); + check_added_monitors!(nodes[1], 1); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -936,7 +948,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht if claim_htlc { confirm_transaction(&nodes[1], &cs_commitment_tx[1]); } else { - connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1); + connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1); let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_timeout_tx.len(), 1); confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);