Rename EnforcingSigner to TestChannelSigner
[rust-lightning] / lightning / src / ln / reload_tests.rs
index 427c4001b6d83b36eefbcb7bf3067ccd65fd4b64..eda517e087eb56a12e23ed68be2d6efbf43d1f45 100644 (file)
 use crate::chain::{ChannelMonitorUpdateStatus, Watch};
 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 use crate::chain::channelmonitor::ChannelMonitor;
-use crate::chain::keysinterface::EntropySource;
+use crate::sign::EntropySource;
 use crate::chain::transaction::OutPoint;
-use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId};
+use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
+use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils;
 use crate::util::errors::APIError;
-use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
 use crate::util::ser::{Writeable, ReadableArgs};
 use crate::util::config::UserConfig;
+use crate::util::string::UntrustedString;
 
 use bitcoin::hash_types::BlockHash;
 
@@ -37,32 +38,39 @@ fn test_funding_peer_disconnect() {
        // Test that we can lock in our funding tx while disconnected
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let persister;
+       let new_chain_monitor;
+
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let persister: test_utils::TestPersister;
-       let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+       let nodes_0_deserialized;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        confirm_transaction(&nodes[0], &tx);
        let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
        assert!(events_1.is_empty());
 
-       reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+       reconnect_args.send_channel_ready.1 = true;
+       reconnect_nodes(reconnect_args);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        confirm_transaction(&nodes[1], &tx);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert!(events_2.is_empty());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
@@ -169,36 +177,41 @@ fn test_funding_peer_disconnect() {
 
        // Check that after deserialization and reconnection we can still generate an identical
        // channel_announcement from the cached signatures.
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
 
        reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 }
 
 #[test]
 fn test_no_txn_manager_serialize_deserialize() {
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let persister;
+       let new_chain_monitor;
+
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let persister: test_utils::TestPersister;
-       let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+       let nodes_0_deserialized;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        let chan_0_monitor_serialized =
                get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -222,10 +235,11 @@ fn test_manager_serialize_deserialize_events() {
        // This test makes sure the events field in ChannelManager survives de/serialization
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let persister;
+       let new_chain_monitor;
+
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let persister: test_utils::TestPersister;
-       let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+       let nodes_0_deserialized;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        // Start creating a channel, but stop right before broadcasting the funding transaction
@@ -260,6 +274,9 @@ fn test_manager_serialize_deserialize_events() {
        }
        // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead
 
+       expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id());
+       expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id());
+
        nodes.push(node_a);
        nodes.push(node_b);
 
@@ -267,7 +284,7 @@ fn test_manager_serialize_deserialize_events() {
        let chan_0_monitor_serialized = get_monitor!(nodes[0], bs_funding_signed.channel_id).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        // After deserializing, make sure the funding_transaction is still held by the channel manager
        let events_4 = nodes[0].node.get_and_clear_pending_events();
@@ -278,9 +295,13 @@ fn test_manager_serialize_deserialize_events() {
        // Make sure the channel is functioning as though the de/serialization never happened
        assert_eq!(nodes[0].node.list_channels().len(), 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -303,22 +324,23 @@ fn test_manager_serialize_deserialize_events() {
 fn test_simple_manager_serialize_deserialize() {
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let persister;
+       let new_chain_monitor;
+
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let persister: test_utils::TestPersister;
-       let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+       let nodes_0_deserialized;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
        let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
        let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
        fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
        claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
@@ -329,13 +351,15 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
        let chanmon_cfgs = create_chanmon_cfgs(4);
        let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let logger;
+       let fee_estimator;
+       let persister;
+       let new_chain_monitor;
+
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
-       let logger: test_utils::TestLogger;
-       let fee_estimator: test_utils::TestFeeEstimator;
-       let persister: test_utils::TestPersister;
-       let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+       let nodes_0_deserialized;
        let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
        let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
        let chan_id_2 = create_announced_chan_between_nodes(&nodes, 2, 0).2;
        let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3);
@@ -353,9 +377,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let nodes_0_serialized = nodes[0].node.encode();
 
        route_payment(&nodes[0], &[&nodes[3]], 1000000);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
        // nodes[3])
@@ -368,14 +392,14 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
        persister = test_utils::TestPersister::new();
        let keys_manager = &chanmon_cfgs[0].keys_manager;
-       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
+       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager);
        nodes[0].chain_monitor = &new_chain_monitor;
 
 
        let mut node_0_stale_monitors = Vec::new();
        for serialized in node_0_stale_monitors_serialized.iter() {
                let mut read = &serialized[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
                assert!(read.is_empty());
                node_0_stale_monitors.push(monitor);
        }
@@ -383,7 +407,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let mut node_0_monitors = Vec::new();
        for serialized in node_0_monitors_serialized.iter() {
                let mut read = &serialized[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
                assert!(read.is_empty());
                node_0_monitors.push(monitor);
        }
@@ -422,30 +446,36 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       { // Channel close should result in a commitment tx
-               let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(txn.len(), 1);
-               check_spends!(txn[0], funding_tx);
-               assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
-       }
-
        for monitor in node_0_monitors.drain(..) {
                assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
                        ChannelMonitorUpdateStatus::Completed);
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
-       check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+
+       check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [nodes[3].node.get_our_node_id()], 100000);
+       { // Channel close should result in a commitment tx
+               nodes[0].node.timer_tick_occurred();
+               let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(txn.len(), 1);
+               check_spends!(txn[0], funding_tx);
+               assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
+       }
+       check_added_monitors!(nodes[0], 1);
 
        // nodes[1] and nodes[2] have no lost state with nodes[0]...
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
-       reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
        //... and we can even still claim the payment!
        claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
 
-       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap();
-       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init {
+               features: nodes[3].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
        let mut found_err = false;
        for msg_event in nodes[0].node.get_and_clear_pending_msg_events() {
@@ -472,11 +502,13 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        // We broadcast during Drop because chanmon is out of sync with chanmgr, which would cause a panic
        // during signing due to revoked tx
        chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let persister;
        let new_chain_monitor;
-       let nodes_0_deserialized;
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes_0_deserialized;
+
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
@@ -488,14 +520,18 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized);
 
        if reconnect_panicing {
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
 
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 
@@ -520,7 +556,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
 
        nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
        {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 0);
@@ -543,8 +579,12 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        // after the warning message sent by B, we should not able to
        // use the channel, or reconnect with success to the channel.
        assert!(nodes[0].node.list_usable_channels().is_empty());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
@@ -566,7 +606,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
        assert!(nodes[1].node.list_usable_channels().is_empty());
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }
+               , [nodes[0].node.get_our_node_id()], 1000000);
        check_closed_broadcast!(nodes[1], false);
 }
 
@@ -590,17 +631,18 @@ fn test_forwardable_regen() {
 
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let persister;
+       let new_chain_monitor;
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
-       let persister: test_utils::TestPersister;
-       let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+       let nodes_1_deserialized;
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
        let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
 
        // First send a payment to nodes[1]
        let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
-       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+       nodes[0].node.send_payment_with_route(&route, payment_hash,
+               RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
        check_added_monitors!(nodes[0], 1);
 
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@ -613,7 +655,8 @@ fn test_forwardable_regen() {
 
        // Next send a payment which is forwarded by nodes[1]
        let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000);
-       nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
+       nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
+               RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
        check_added_monitors!(nodes[0], 1);
 
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@ -627,17 +670,19 @@ fn test_forwardable_regen() {
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
        // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
        let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
        let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
        reload_node!(nodes[1], nodes[1].node.encode(), &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
 
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
        // Note that nodes[1] and nodes[2] resend their channel_ready here since they haven't updated
        // the commitment state.
-       reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
+       reconnect_args.send_channel_ready = (true, true);
+       reconnect_nodes(reconnect_args);
 
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
@@ -673,11 +718,11 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        // definitely claimed.
        let chanmon_cfgs = create_chanmon_cfgs(4);
        let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let persister;
+       let new_chain_monitor;
 
-       let persister: test_utils::TestPersister;
-       let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_3_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes_3_deserialized;
 
        let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
@@ -691,18 +736,19 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        assert_eq!(route.paths.len(), 2);
        route.paths.sort_by(|path_a, _| {
                // Sort the path so that the path through nodes[1] comes first
-               if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+               if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
                        core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
        });
 
-       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+       nodes[0].node.send_payment_with_route(&route, payment_hash,
+               RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
        check_added_monitors!(nodes[0], 2);
 
        // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event
        let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(send_events.len(), 2);
-       let (node_1_msgs, mut send_events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &send_events);
-       let (node_2_msgs, _send_events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &send_events);
+       let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut send_events);
+       let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut send_events);
        do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, true, false, None);
        do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_2_msgs, true, false, None);
 
@@ -753,8 +799,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash));
        assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash));
 
-       nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id());
 
        // During deserialization, we should have closed one channel and broadcast its latest
        // commitment transaction. We should also still have the original PaymentClaimable event we
@@ -765,6 +811,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
        if persist_both_monitors {
                if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
+               check_added_monitors(&nodes[3], 2);
+       } else {
+               check_added_monitors(&nodes[3], 1);
        }
 
        // On restart, we should also get a duplicate PaymentClaimed event as we persisted the
@@ -779,9 +828,13 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        if !persist_both_monitors {
                // If one of the two channels is still live, reveal the payment preimage over it.
 
-               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[2].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
-               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[3].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
 
                nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
@@ -829,12 +882,12 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
        // This was never an issue, but it may be easy to regress here going forward.
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let persister;
+       let new_chain_monitor;
+
        let mut intercept_forwards_config = test_default_channel_config();
        intercept_forwards_config.accept_intercept_htlcs = true;
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]);
-
-       let persister;
-       let new_chain_monitor;
        let nodes_1_deserialized;
 
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
@@ -847,11 +900,12 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
        let (mut route, payment_hash, payment_preimage, payment_secret) =
                get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
        if use_intercept {
-               route.paths[0][1].short_channel_id = intercept_scid;
+               route.paths[0].hops[1].short_channel_id = intercept_scid;
        }
        let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
        let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV;
-       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
+       nodes[0].node.send_payment_with_route(&route, payment_hash,
+               RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap();
        check_added_monitors!(nodes[0], 1);
 
        let payment_event = SendEvent::from_node(&nodes[0]);
@@ -899,7 +953,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
        assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
 
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
        check_closed_broadcast!(nodes[2], true);
 
        let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
@@ -908,7 +962,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
 
        // Note that this checks that this is the only event on nodes[1], implying the
        // `HTLCIntercepted` event has been removed in the `use_intercept` case.
-       check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager);
+       check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager, [nodes[2].node.get_our_node_id()], 100000);
 
        if use_intercept {
                // Attempt to forward the HTLC back out over nodes[1]' still-open channel, ensuring we get
@@ -920,11 +974,13 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
                });
        }
 
+       nodes[1].node.timer_tick_occurred();
        let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(bs_commitment_tx.len(), 1);
+       check_added_monitors!(nodes[1], 1);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
        if use_cs_commitment {
                // If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait
@@ -936,7 +992,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
                if claim_htlc {
                        confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
                } else {
-                       connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1);
+                       connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
                        let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                        assert_eq!(bs_htlc_timeout_tx.len(), 1);
                        confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
@@ -996,10 +1052,10 @@ fn removed_payment_no_manager_persistence() {
        // were left dangling when a channel was force-closed due to a stale ChannelManager.
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
-
        let persister;
        let new_chain_monitor;
+
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes_1_deserialized;
 
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
@@ -1035,11 +1091,14 @@ fn removed_payment_no_manager_persistence() {
                _ => panic!("Unexpected event"),
        }
 
+       nodes[1].node.test_process_background_events();
+       check_added_monitors(&nodes[1], 1);
+
        // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
        // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
        // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
        check_added_monitors!(nodes[1], 1);