Add `networks` TLV to `Init`'s TLV stream
[rust-lightning] / lightning / src / ln / reload_tests.rs
index f7342671afea7e801632ee22bcf74e5da3ce8263..bf2fb1de1e30d4a9ff3bf1fa5498d8f37fbbf5f8 100644 (file)
@@ -12,7 +12,7 @@
 use crate::chain::{ChannelMonitorUpdateStatus, Watch};
 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 use crate::chain::channelmonitor::ChannelMonitor;
-use crate::chain::keysinterface::EntropySource;
+use crate::sign::EntropySource;
 use crate::chain::transaction::OutPoint;
 use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
 use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
@@ -61,9 +61,13 @@ fn test_funding_peer_disconnect() {
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert!(events_2.is_empty());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
@@ -197,9 +201,13 @@ fn test_no_txn_manager_serialize_deserialize() {
                get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -282,9 +290,13 @@ fn test_manager_serialize_deserialize_events() {
        // Make sure the channel is functioning as though the de/serialization never happened
        assert_eq!(nodes[0].node.list_channels().len(), 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -449,9 +461,13 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        //... and we can even still claim the payment!
        claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
 
-       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap();
-       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init {
+               features: nodes[3].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
        let mut found_err = false;
        for msg_event in nodes[0].node.get_and_clear_pending_msg_events() {
@@ -500,8 +516,12 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized);
 
        if reconnect_panicing {
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
 
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 
@@ -549,8 +569,12 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        // after the warning message sent by B, we should not able to
        // use the channel, or reconnect with success to the channel.
        assert!(nodes[0].node.list_usable_channels().is_empty());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
@@ -699,7 +723,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        assert_eq!(route.paths.len(), 2);
        route.paths.sort_by(|path_a, _| {
                // Sort the path so that the path through nodes[1] comes first
-               if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+               if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
                        core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
        });
 
@@ -774,6 +798,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
        if persist_both_monitors {
                if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
+               check_added_monitors(&nodes[3], 2);
+       } else {
+               check_added_monitors(&nodes[3], 1);
        }
 
        // On restart, we should also get a duplicate PaymentClaimed event as we persisted the
@@ -788,9 +815,13 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        if !persist_both_monitors {
                // If one of the two channels is still live, reveal the payment preimage over it.
 
-               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap();
+               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[2].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
-               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
+               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[3].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
 
                nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
@@ -856,7 +887,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
        let (mut route, payment_hash, payment_preimage, payment_secret) =
                get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
        if use_intercept {
-               route.paths[0][1].short_channel_id = intercept_scid;
+               route.paths[0].hops[1].short_channel_id = intercept_scid;
        }
        let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
        let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV;
@@ -948,7 +979,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
                if claim_htlc {
                        confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
                } else {
-                       connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1);
+                       connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
                        let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                        assert_eq!(bs_htlc_timeout_tx.len(), 1);
                        confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
@@ -1047,6 +1078,9 @@ fn removed_payment_no_manager_persistence() {
                _ => panic!("Unexpected event"),
        }
 
+       nodes[1].node.test_process_background_events();
+       check_added_monitors(&nodes[1], 1);
+
        // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
        // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
        // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.