Merge pull request #3125 from valentinewallace/2024-06-async-payments-prefactor
[rust-lightning] / lightning / src / ln / shutdown_tests.rs
index ee6a6afb6ecda2ae2a919a610b9ba9e978d5e5e9..802ee1e563bf971b441604a830d1fee10f4015ec 100644 (file)
@@ -14,9 +14,11 @@ use crate::sign::{EntropySource, SignerProvider};
 use crate::chain::ChannelMonitorUpdateStatus;
 use crate::chain::transaction::OutPoint;
 use crate::events::{Event, MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason};
-use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry, ChannelShutdownState, ChannelDetails};
+use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState};
+use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry};
 use crate::routing::router::{PaymentParameters, get_route, RouteParameters};
-use crate::ln::{ChannelId, msgs};
+use crate::ln::msgs;
+use crate::ln::types::ChannelId;
 use crate::ln::msgs::{ChannelMessageHandler, ErrorAction};
 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
 use crate::ln::script::ShutdownScript;
@@ -25,18 +27,15 @@ use crate::util::test_utils::OnGetShutdownScriptpubkey;
 use crate::util::errors::APIError;
 use crate::util::config::UserConfig;
 use crate::util::string::UntrustedString;
+use crate::prelude::*;
 
-use bitcoin::{Transaction, TxOut};
+use bitcoin::{Transaction, TxOut, WitnessProgram, WitnessVersion};
+use bitcoin::amount::Amount;
 use bitcoin::blockdata::locktime::absolute::LockTime;
 use bitcoin::blockdata::script::Builder;
 use bitcoin::blockdata::opcodes;
-use bitcoin::network::constants::Network;
-use bitcoin::address::{WitnessProgram, WitnessVersion};
-
-use regex;
-
-use core::default::Default;
-use std::convert::TryFrom;
+use bitcoin::network::Network;
+use bitcoin::transaction::Version;
 
 use crate::ln::functional_test_utils::*;
 
@@ -292,7 +291,7 @@ fn close_on_unfunded_channel() {
        let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 
        nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1_000_000);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 1_000_000);
 }
 
 #[test]
@@ -303,11 +302,12 @@ fn expect_channel_shutdown_state_with_force_closure() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let error_message = "Channel force-closed";
 
        expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
        expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
 
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 
@@ -324,7 +324,7 @@ fn expect_channel_shutdown_state_with_force_closure() {
        assert!(nodes[1].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[0], true);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -437,7 +437,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) {
        let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
        let route_params = if blinded_recipient {
                crate::ln::blinded_payment_tests::get_blinded_route_parameters(
-                       amt_msat, our_payment_secret,
+                       amt_msat, our_payment_secret, 1, 100000000,
                        nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_2.0.contents],
                        &chanmon_cfgs[2].keys_manager)
        } else {
@@ -1172,17 +1172,17 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
        assert_eq!(txn[0].output.len(), 2);
 
        if timeout_step != TimeoutStep::NoTimeout {
-               assert!((txn[0].output[0].script_pubkey.is_v0_p2wpkh() &&
-                        txn[0].output[1].script_pubkey.is_v0_p2wsh()) ||
-                       (txn[0].output[1].script_pubkey.is_v0_p2wpkh() &&
-                        txn[0].output[0].script_pubkey.is_v0_p2wsh()));
+               assert!((txn[0].output[0].script_pubkey.is_p2wpkh() &&
+                        txn[0].output[1].script_pubkey.is_p2wsh()) ||
+                       (txn[0].output[1].script_pubkey.is_p2wpkh() &&
+                        txn[0].output[0].script_pubkey.is_p2wsh()));
                check_closed_broadcast!(nodes[1], true);
                check_added_monitors!(nodes[1], 1);
                check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string() }
                        , [nodes[0].node.get_our_node_id()], 100000);
        } else {
-               assert!(txn[0].output[0].script_pubkey.is_v0_p2wpkh());
-               assert!(txn[0].output[1].script_pubkey.is_v0_p2wpkh());
+               assert!(txn[0].output[0].script_pubkey.is_p2wpkh());
+               assert!(txn[0].output[1].script_pubkey.is_p2wpkh());
 
                let events = nodes[1].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -1405,32 +1405,118 @@ fn batch_funding_failure() {
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
-       exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
-       exchange_open_accept_chan(&nodes[0], &nodes[2], 1_000_000, 0);
+       let temp_chan_id_a = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+       let temp_chan_id_b = exchange_open_accept_chan(&nodes[0], &nodes[2], 1_000_000, 0);
 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
        // Build a transaction which only has the output for one of the two channels we're trying to
        // confirm. Previously this led to a deadlock in channel closure handling.
-       let mut tx = Transaction { version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
+       let mut tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
        let mut chans = Vec::new();
        for (idx, ev) in events.iter().enumerate() {
                if let Event::FundingGenerationReady { temporary_channel_id, counterparty_node_id, output_script, .. } = ev {
                        if idx == 0 {
-                               tx.output.push(TxOut { value: 1_000_000, script_pubkey: output_script.clone() });
+                               tx.output.push(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: output_script.clone() });
                        }
                        chans.push((temporary_channel_id, counterparty_node_id));
                } else { panic!(); }
        }
 
-       // We should probably end up with an error for both channels, but currently we don't generate
-       // an error for the failing channel itself.
        let err = "Error in transaction funding: Misuse error: No output matched the script_pubkey and value in the FundingGenerationReady event".to_string();
-       let close = [ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_txid(tx.txid().as_ref(), 0), true, ClosureReason::ProcessingError { err })];
+       let temp_err = "No output matched the script_pubkey and value in the FundingGenerationReady event".to_string();
+       let post_funding_chan_id_a = ChannelId::v1_from_funding_txid(tx.txid().as_ref(), 0);
+       let close = [
+               ExpectedCloseEvent::from_id_reason(post_funding_chan_id_a, true, ClosureReason::ProcessingError { err: err.clone() }),
+               ExpectedCloseEvent::from_id_reason(temp_chan_id_b, false, ClosureReason::ProcessingError { err: temp_err }),
+       ];
 
        nodes[0].node.batch_funding_transaction_generated(&chans, tx).unwrap_err();
 
-       get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+       let msgs = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msgs.len(), 3);
+       // We currently spuriously send `FundingCreated` for the first channel and then immediately
+       // fail both channels, which isn't ideal but should be fine.
+       assert!(msgs.iter().any(|msg| {
+               if let MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage {
+                       msg: msgs::ErrorMessage { channel_id, .. }, ..
+               }, .. } = msg {
+                       *channel_id == temp_chan_id_b
+               } else { false }
+       }));
+       let funding_created_pos = msgs.iter().position(|msg| {
+               if let MessageSendEvent::SendFundingCreated { msg: msgs::FundingCreated { temporary_channel_id, .. }, .. } = msg {
+                       assert_eq!(*temporary_channel_id, temp_chan_id_a);
+                       true
+               } else { false }
+       }).unwrap();
+       let funded_channel_close_pos = msgs.iter().position(|msg| {
+               if let MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage {
+                       msg: msgs::ErrorMessage { channel_id, .. }, ..
+               }, .. } = msg {
+                       *channel_id == post_funding_chan_id_a
+               } else { false }
+       }).unwrap();
+
+       // The error message uses the funded channel_id so must come after the funding_created
+       assert!(funded_channel_close_pos > funding_created_pos);
+
        check_closed_events(&nodes[0], &close);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
 }
+
+#[test]
+fn test_force_closure_on_low_stale_fee() {
+       // Check that we force-close channels if they have a low fee and that has gotten stale (without
+       // update).
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+       // Start by connecting lots of blocks to give LDK some feerate history
+       for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS * 2 {
+               connect_blocks(&nodes[1], 1);
+       }
+
+       // Now connect a handful of blocks with a "high" feerate
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock *= 2;
+       }
+       for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+               connect_blocks(&nodes[1], 1);
+       }
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Now, note that one more block would cause us to force-close, it won't because we've dropped
+       // the feerate
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock /= 2;
+       }
+       connect_blocks(&nodes[1], super::channelmanager::FEERATE_TRACKING_BLOCKS as u32 * 2);
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Now, connect another FEERATE_TRACKING_BLOCKS - 1 blocks at a high feerate, note that none of
+       // these will cause a force-closure because LDK only looks at the minimium feerate over the
+       // last FEERATE_TRACKING_BLOCKS blocks.
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock *= 2;
+       }
+
+       for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+               connect_blocks(&nodes[1], 1);
+       }
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Finally, connect one more block and check the force-close happened.
+       connect_blocks(&nodes[1], 1);
+       check_added_monitors!(nodes[1], 1);
+       check_closed_broadcast(&nodes[1], 1, true);
+       let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, required_feerate_sat_per_kw: 253 * 2 };
+       check_closed_events(&nodes[1], &[ExpectedCloseEvent::from_id_reason(chan_id, false, reason)]);
+}