Add constructor to `RouteParameters`
[rust-lightning] / lightning / src / ln / functional_tests.rs
index f5ba7166ca71b80c0732c0f758001b82f9aa371c..d3401e5f0681a73c77157f9756169fe4c9229158 100644 (file)
@@ -17,9 +17,9 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 use crate::chain::channelmonitor;
 use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use crate::chain::transaction::OutPoint;
-use crate::sign::{EcdsaChannelSigner, EntropySource};
+use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
-use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
@@ -30,8 +30,8 @@ use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
-use crate::util::test_utils;
+use crate::util::test_channel_signer::TestChannelSigner;
+use crate::util::test_utils::{self, WatchtowerPersister};
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
 use crate::util::string::UntrustedString;
@@ -56,7 +56,7 @@ use alloc::collections::BTreeSet;
 use core::default::Default;
 use core::iter::repeat;
 use bitcoin::hashes::Hash;
-use crate::sync::{Arc, Mutex};
+use crate::sync::{Arc, Mutex, RwLock};
 
 use crate::ln::functional_test_utils::*;
 use crate::ln::chan_utils::CommitmentTransaction;
@@ -696,7 +696,7 @@ fn test_update_fee_that_funder_cannot_afford() {
 
        const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
 
-       // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
+       // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
        // needed to sign the new commitment tx and (2) sign the new commitment tx.
        let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
@@ -1409,7 +1409,7 @@ fn test_fee_spike_violation_fails_htlc() {
 
        const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
-       // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
+       // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
        // needed to sign the new commitment tx and (2) sign the new commitment tx.
        let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
@@ -1505,7 +1505,7 @@ fn test_fee_spike_violation_fails_htlc() {
                _ => panic!("Unexpected event"),
        };
        nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
-               format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1);
+               format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
 
        check_added_monitors!(nodes[1], 2);
 }
@@ -2554,6 +2554,72 @@ fn revoked_output_claim() {
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 }
 
+#[test]
+fn test_forming_justice_tx_from_monitor_updates() {
+       do_test_forming_justice_tx_from_monitor_updates(true);
+       do_test_forming_justice_tx_from_monitor_updates(false);
+}
+
+fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) {
+       // Simple test to make sure that the justice tx formed in WatchtowerPersister
+       // is properly formed and can be broadcasted/confirmed successfully in the event
+       // that a revoked commitment transaction is broadcasted
+       // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script().unwrap();
+       let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script().unwrap();
+       let persisters = vec![WatchtowerPersister::new(destination_script0),
+               WatchtowerPersister::new(destination_script1)];
+       let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
+
+       if !broadcast_initial_commitment {
+               // Send a payment to move the channel forward
+               send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
+       }
+
+       // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output.
+       // We'll keep this commitment transaction to broadcast once it's revoked.
+       let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id);
+       assert_eq!(revoked_local_txn.len(), 1);
+       let revoked_commitment_tx = &revoked_local_txn[0];
+
+       // Send another payment, now revoking the previous commitment tx
+       send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
+
+       let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap();
+       check_spends!(justice_tx, revoked_commitment_tx);
+
+       mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]);
+       mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]);
+
+       check_added_monitors!(nodes[1], 1);
+       check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
+               &[nodes[0].node.get_our_node_id()], 100_000);
+       get_announce_close_broadcast_events(&nodes, 1, 0);
+
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
+               &[nodes[1].node.get_our_node_id()], 100_000);
+
+       // Check that the justice tx has sent the revoked output value to nodes[1]
+       let monitor = get_monitor!(nodes[1], channel_id);
+       let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| {
+               match balance {
+                       channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis,
+                       _ => panic!("Unexpected balance type"),
+               }
+       });
+       // On the first commitment, node[1]'s balance was below dust so it didn't have an output
+       let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value };
+       let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value;
+       assert_eq!(total_claimable_balance, expected_claimable_balance);
+}
+
+
 #[test]
 fn claim_htlc_outputs_shared_tx() {
        // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
@@ -5368,7 +5434,7 @@ fn test_key_derivation_params() {
        let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
        let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
-       let scorer = Mutex::new(test_utils::TestScorer::new());
+       let scorer = RwLock::new(test_utils::TestScorer::new());
        let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
        let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
        let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
@@ -5769,7 +5835,7 @@ fn test_fail_holding_cell_htlc_upon_free() {
        // us to surface its failure to the user.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -5857,7 +5923,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
        // to surface its failure to the user. The first payment should succeed.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the second payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -7573,7 +7639,7 @@ fn test_counterparty_raa_skip_no_crash() {
        // commitment transaction, we would have happily carried on and provided them the next
        // commitment transaction based on one RAA forward. This would probably eventually have led to
        // channel closure, but it would not have resulted in funds loss. Still, our
-       // EnforcingSigner would have panicked as it doesn't like jumps into the future. Here, we
+       // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
        // check simply that the channel is closed in response to such an RAA, but don't check whether
        // we decide to punish our counterparty for revoking their funds (as we don't currently
        // implement that).
@@ -7937,7 +8003,7 @@ fn test_can_not_accept_unknown_inbound_channel() {
        let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
        let nodes = create_network(2, &node_cfg, &node_chanmgr);
 
-       let unknown_channel_id = [0; 32];
+       let unknown_channel_id = ChannelId::new_zero();
        let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
        match api_res {
                Err(APIError::APIMisuseError { err }) => {
@@ -8284,7 +8350,7 @@ fn test_update_err_monitor_lockdown() {
        let watchtower = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8354,7 +8420,7 @@ fn test_concurrent_monitor_claim() {
        let watchtower_alice = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8385,7 +8451,7 @@ fn test_concurrent_monitor_claim() {
        let watchtower_bob = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8951,7 +9017,7 @@ fn test_error_chans_closed() {
 
        // A null channel ID should close all channels
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
-       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
+       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
        check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
                [nodes[1].node.get_our_node_id(); 2], 100000);