Builder for creating static invoices from offers
[rust-lightning] / lightning / src / ln / reload_tests.rs
index 223aa5dbac30ba64604992d6b272722ceb3ed1c0..4c32a0bd0700730f3046c2976cb1ab617e58d5ef 100644 (file)
@@ -15,21 +15,19 @@ use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, ChannelMonitor};
 use crate::sign::EntropySource;
 use crate::chain::transaction::OutPoint;
 use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
-use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, Retry, RecipientOnionFields};
+use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
 use crate::ln::msgs;
+use crate::ln::types::ChannelId;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
-use crate::routing::router::{RouteParameters, PaymentParameters};
 use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
 use crate::util::config::UserConfig;
-use crate::util::string::UntrustedString;
 
 use bitcoin::hash_types::BlockHash;
 
 use crate::prelude::*;
-use core::default::Default;
 use crate::sync::Mutex;
 
 use crate::ln::functional_test_utils::*;
@@ -203,7 +201,7 @@ fn test_no_txn_manager_serialize_deserialize() {
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        let chan_0_monitor_serialized =
-               get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
+               get_monitor!(nodes[0], ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 })).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
@@ -414,7 +412,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        }
 
        let mut nodes_0_read = &nodes_0_serialized[..];
-       if let Err(msgs::DecodeError::InvalidValue) =
+       if let Err(msgs::DecodeError::DangerousValue) =
                <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                default_config: UserConfig::default(),
                entropy_source: keys_manager,
@@ -448,7 +446,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        assert!(nodes_0_read.is_empty());
 
        for monitor in node_0_monitors.drain(..) {
-               assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+               let funding_outpoint = monitor.get_funding_txo().0;
+               assert_eq!(nodes[0].chain_monitor.watch_channel(funding_outpoint, monitor),
                        Ok(ChannelMonitorUpdateStatus::Completed));
                check_added_monitors!(nodes[0], 1);
        }
@@ -496,6 +495,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 
 #[cfg(feature = "std")]
 fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, not_stale: bool) {
+       use crate::routing::router::{RouteParameters, PaymentParameters};
+       use crate::ln::channelmanager::Retry;
+       use crate::util::string::UntrustedString;
        // When we get a data_loss_protect proving we're behind, we immediately panic as the
        // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
        // panic message informs the user they should force-close without broadcasting, which is tested
@@ -626,9 +628,10 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool,
                        std::mem::forget(nodes);
                }
        } else {
+               let error_message = "Channel force-closed";
                assert!(!not_stale, "We only care about the stale case when not testing panicking");
 
-               nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_added_monitors!(nodes[0], 1);
                check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
                {
@@ -640,8 +643,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool,
                        if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
                        } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
                                match action {
-                                       &ErrorAction::DisconnectPeer { ref msg } => {
-                                               assert_eq!(msg.as_ref().unwrap().data, "Channel force-closed");
+                                       &ErrorAction::SendErrorMessage { ref msg } => {
+                                               assert_eq!(&msg.data, "Channel force-closed");
                                        },
                                        _ => panic!("Unexpected event!"),
                                }
@@ -821,15 +824,19 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        assert_eq!(send_events.len(), 2);
        let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut send_events);
        let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut send_events);
-       do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, true, false, None, false);
-       do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_2_msgs, true, false, None, false);
+       do_pass_along_path(PassAlongPathArgs::new(&nodes[0],&[&nodes[1], &nodes[3]], 15_000_000, payment_hash, node_1_msgs)
+               .with_payment_secret(payment_secret)
+               .without_clearing_recipient_events());
+       do_pass_along_path(PassAlongPathArgs::new(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, node_2_msgs)
+               .with_payment_secret(payment_secret)
+               .without_clearing_recipient_events());
 
        // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
        // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
        let mut original_monitor = test_utils::TestVecWriter(Vec::new());
        if !persist_both_monitors {
-               for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
-                       if outpoint.to_channel_id() == chan_id_not_persisted {
+               for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+                       if channel_id == chan_id_not_persisted {
                                assert!(original_monitor.0.is_empty());
                                nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
                        }
@@ -848,16 +855,16 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
        // with the old ChannelManager.
        let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
-       for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
-               if outpoint.to_channel_id() == chan_id_persisted {
+       for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+               if channel_id == chan_id_persisted {
                        assert!(updated_monitor.0.is_empty());
                        nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
                }
        }
        // If `persist_both_monitors` is set, get the second monitor here as well
        if persist_both_monitors {
-               for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
-                       if outpoint.to_channel_id() == chan_id_not_persisted {
+               for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+                       if channel_id == chan_id_not_persisted {
                                assert!(original_monitor.0.is_empty());
                                nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
                        }
@@ -1019,8 +1026,9 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
        assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
 
        let _ = nodes[2].node.get_and_clear_pending_msg_events();
+       let error_message = "Channel force-closed";
 
-       nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
 
@@ -1065,9 +1073,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
                        confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
                } else {
                        connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
-                       let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-                       assert_eq!(bs_htlc_timeout_tx.len(), 1);
-                       confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
+                       let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+                       assert_eq!(txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
+                       let bs_htlc_timeout_tx = txn.pop().unwrap();
+                       confirm_transaction(&nodes[1], &bs_htlc_timeout_tx);
                }
        } else {
                confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
@@ -1218,7 +1227,7 @@ fn test_reload_partial_funding_batch() {
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
 
        // Reload the node while a subset of the channels in the funding batch have persisted monitors.
-       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
+       let channel_id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 });
        let node_encoded = nodes[0].node.encode();
        let channel_monitor_1_serialized = get_monitor!(nodes[0], channel_id_1).encode();
        reload_node!(nodes[0], node_encoded, &[&channel_monitor_1_serialized], new_persister, new_chain_monitor, new_channel_manager);