X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Freload_tests.rs;h=b53c617916a16deb8efc52b0197dacd61f5af780;hb=d7e3320c030654ca3ff2b6ffd83ae65dfbe5e3c8;hp=ffb6ba0e85a1340da52bc88d28168e05f0f5dfe1;hpb=a9534fe6b53ee5a35bcedb897f03804dc17f2d39;p=rust-lightning diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index ffb6ba0e..b53c6179 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -12,10 +12,10 @@ use crate::chain::{ChannelMonitorUpdateStatus, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::ChannelMonitor; -use crate::chain::keysinterface::EntropySource; +use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; -use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId}; +use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use crate::util::enforcing_trait_impls::EnforcingSigner; @@ -61,9 +61,13 @@ fn test_funding_peer_disconnect() { let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert!(events_2.is_empty()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect. @@ -197,9 +201,13 @@ fn test_no_txn_manager_serialize_deserialize() { get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode(); reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); @@ -261,6 +269,9 @@ fn test_manager_serialize_deserialize_events() { } // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead + expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id()); + expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id()); + nodes.push(node_a); nodes.push(node_b); @@ -279,9 +290,13 @@ fn test_manager_serialize_deserialize_events() { // Make sure the channel is functioning as though the de/serialization never happened assert_eq!(nodes[0].node.list_channels().len(), 1); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); @@ -369,7 +384,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; persister = test_utils::TestPersister::new(); let keys_manager = &chanmon_cfgs[0].keys_manager; - new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager); + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager); nodes[0].chain_monitor = &new_chain_monitor; @@ -446,9 +461,13 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { //... and we can even still claim the payment! claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage); - nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap(); - nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { + features: nodes[3].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish); let mut found_err = false; for msg_event in nodes[0].node.get_and_clear_pending_msg_events() { @@ -497,8 +516,12 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized); if reconnect_panicing { - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); @@ -546,8 +569,12 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { // after the warning message sent by B, we should not able to // use the channel, or reconnect with success to the channel. assert!(nodes[0].node.list_usable_channels().is_empty()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]); @@ -603,7 +630,8 @@ fn test_forwardable_regen() { // First send a payment to nodes[1] let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -616,7 +644,8 @@ fn test_forwardable_regen() { // Next send a payment which is forwarded by nodes[1] let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000); - nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route_2, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -694,11 +723,12 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a[0].pubkey == nodes[1].node.get_our_node_id() { + if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 2); // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event @@ -768,6 +798,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); } if persist_both_monitors { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } + check_added_monitors(&nodes[3], 2); + } else { + check_added_monitors(&nodes[3], 1); } // On restart, we should also get a duplicate PaymentClaimed event as we persisted the @@ -782,9 +815,13 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { if !persist_both_monitors { // If one of the two channels is still live, reveal the payment preimage over it. - nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { + features: nodes[2].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]); - nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { + features: nodes[3].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]); nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]); @@ -850,11 +887,12 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); if use_intercept { - route.paths[0][1].short_channel_id = intercept_scid; + route.paths[0].hops[1].short_channel_id = intercept_scid; } let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV; - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); @@ -941,7 +979,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht if claim_htlc { confirm_transaction(&nodes[1], &cs_commitment_tx[1]); } else { - connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1); + connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1); let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_timeout_tx.len(), 1); confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]); @@ -1040,6 +1078,9 @@ fn removed_payment_no_manager_persistence() { _ => panic!("Unexpected event"), } + nodes[1].node.test_process_background_events(); + check_added_monitors(&nodes[1], 1); + // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload, // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.