Merge pull request #3125 from valentinewallace/2024-06-async-payments-prefactor
[rust-lightning] / lightning / src / ln / shutdown_tests.rs
index 12421ce9c3f6323e31747173d4064572f5ae1198..802ee1e563bf971b441604a830d1fee10f4015ec 100644 (file)
@@ -14,7 +14,8 @@ use crate::sign::{EntropySource, SignerProvider};
 use crate::chain::ChannelMonitorUpdateStatus;
 use crate::chain::transaction::OutPoint;
 use crate::events::{Event, MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason};
-use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry, ChannelShutdownState, ChannelDetails};
+use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState};
+use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry};
 use crate::routing::router::{PaymentParameters, get_route, RouteParameters};
 use crate::ln::msgs;
 use crate::ln::types::ChannelId;
@@ -28,12 +29,13 @@ use crate::util::config::UserConfig;
 use crate::util::string::UntrustedString;
 use crate::prelude::*;
 
-use bitcoin::{Transaction, TxOut};
+use bitcoin::{Transaction, TxOut, WitnessProgram, WitnessVersion};
+use bitcoin::amount::Amount;
 use bitcoin::blockdata::locktime::absolute::LockTime;
 use bitcoin::blockdata::script::Builder;
 use bitcoin::blockdata::opcodes;
-use bitcoin::network::constants::Network;
-use bitcoin::address::{WitnessProgram, WitnessVersion};
+use bitcoin::network::Network;
+use bitcoin::transaction::Version;
 
 use crate::ln::functional_test_utils::*;
 
@@ -289,7 +291,7 @@ fn close_on_unfunded_channel() {
        let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 
        nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1_000_000);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 1_000_000);
 }
 
 #[test]
@@ -300,11 +302,12 @@ fn expect_channel_shutdown_state_with_force_closure() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let error_message = "Channel force-closed";
 
        expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
        expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
 
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 
@@ -321,7 +324,7 @@ fn expect_channel_shutdown_state_with_force_closure() {
        assert!(nodes[1].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[0], true);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1169,17 +1172,17 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
        assert_eq!(txn[0].output.len(), 2);
 
        if timeout_step != TimeoutStep::NoTimeout {
-               assert!((txn[0].output[0].script_pubkey.is_v0_p2wpkh() &&
-                        txn[0].output[1].script_pubkey.is_v0_p2wsh()) ||
-                       (txn[0].output[1].script_pubkey.is_v0_p2wpkh() &&
-                        txn[0].output[0].script_pubkey.is_v0_p2wsh()));
+               assert!((txn[0].output[0].script_pubkey.is_p2wpkh() &&
+                        txn[0].output[1].script_pubkey.is_p2wsh()) ||
+                       (txn[0].output[1].script_pubkey.is_p2wpkh() &&
+                        txn[0].output[0].script_pubkey.is_p2wsh()));
                check_closed_broadcast!(nodes[1], true);
                check_added_monitors!(nodes[1], 1);
                check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string() }
                        , [nodes[0].node.get_our_node_id()], 100000);
        } else {
-               assert!(txn[0].output[0].script_pubkey.is_v0_p2wpkh());
-               assert!(txn[0].output[1].script_pubkey.is_v0_p2wpkh());
+               assert!(txn[0].output[0].script_pubkey.is_p2wpkh());
+               assert!(txn[0].output[1].script_pubkey.is_p2wpkh());
 
                let events = nodes[1].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -1409,12 +1412,12 @@ fn batch_funding_failure() {
        assert_eq!(events.len(), 2);
        // Build a transaction which only has the output for one of the two channels we're trying to
        // confirm. Previously this led to a deadlock in channel closure handling.
-       let mut tx = Transaction { version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
+       let mut tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
        let mut chans = Vec::new();
        for (idx, ev) in events.iter().enumerate() {
                if let Event::FundingGenerationReady { temporary_channel_id, counterparty_node_id, output_script, .. } = ev {
                        if idx == 0 {
-                               tx.output.push(TxOut { value: 1_000_000, script_pubkey: output_script.clone() });
+                               tx.output.push(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: output_script.clone() });
                        }
                        chans.push((temporary_channel_id, counterparty_node_id));
                } else { panic!(); }
@@ -1461,3 +1464,59 @@ fn batch_funding_failure() {
        check_closed_events(&nodes[0], &close);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
 }
+
+#[test]
+fn test_force_closure_on_low_stale_fee() {
+       // Check that we force-close channels if they have a low fee and that has gotten stale (without
+       // update).
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+       // Start by connecting lots of blocks to give LDK some feerate history
+       for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS * 2 {
+               connect_blocks(&nodes[1], 1);
+       }
+
+       // Now connect a handful of blocks with a "high" feerate
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock *= 2;
+       }
+       for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+               connect_blocks(&nodes[1], 1);
+       }
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Now, note that one more block would cause us to force-close, it won't because we've dropped
+       // the feerate
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock /= 2;
+       }
+       connect_blocks(&nodes[1], super::channelmanager::FEERATE_TRACKING_BLOCKS as u32 * 2);
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Now, connect another FEERATE_TRACKING_BLOCKS - 1 blocks at a high feerate, note that none of
+       // these will cause a force-closure because LDK only looks at the minimium feerate over the
+       // last FEERATE_TRACKING_BLOCKS blocks.
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock *= 2;
+       }
+
+       for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+               connect_blocks(&nodes[1], 1);
+       }
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Finally, connect one more block and check the force-close happened.
+       connect_blocks(&nodes[1], 1);
+       check_added_monitors!(nodes[1], 1);
+       check_closed_broadcast(&nodes[1], 1, true);
+       let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, required_feerate_sat_per_kw: 253 * 2 };
+       check_closed_events(&nodes[1], &[ExpectedCloseEvent::from_id_reason(chan_id, false, reason)]);
+}