From 47cb45ed32c09f39418abd200cfc1f7d402964e4 Mon Sep 17 00:00:00 2001 From: henghonglee Date: Wed, 28 Jun 2023 00:54:15 +0800 Subject: [PATCH] Add ChannelShutdownState to ChannelDetails This commit adds the state of channel shutdown to channeldetails --- fuzz/src/router.rs | 1 + lightning/src/ln/channel.rs | 37 ++++- lightning/src/ln/channelmanager.rs | 37 +++++ lightning/src/ln/functional_test_utils.rs | 10 ++ lightning/src/ln/shutdown_tests.rs | 165 +++++++++++++++++++++- lightning/src/routing/router.rs | 4 +- 6 files changed, 245 insertions(+), 9 deletions(-) diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index 72935f153..31732257c 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -270,6 +270,7 @@ pub fn do_test(data: &[u8], out: Out) { inbound_htlc_maximum_msat: None, config: None, feerate_sat_per_1000_weight: None, + channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown), }); } Some(&first_hops_vec[..]) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index db3858823..680757bae 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -27,7 +27,7 @@ use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; use crate::ln::msgs::DecodeError; use crate::ln::script::{self, ShutdownScript}; -use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; +use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState}; use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction}; use crate::ln::chan_utils; use crate::ln::onion_utils::HTLCFailReason; @@ -903,6 +903,34 @@ impl ChannelContext { (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready } + /// shutdown state returns the state of the channel in its various stages of shutdown + pub fn shutdown_state(&self) -> ChannelShutdownState { + if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 { + return ChannelShutdownState::ShutdownComplete; + } + if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 { + return ChannelShutdownState::ShutdownInitiated; + } + if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() { + return ChannelShutdownState::ResolvingHTLCs; + } + if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() { + return ChannelShutdownState::NegotiatingClosingFee; + } + return ChannelShutdownState::NotShuttingDown; + } + + fn closing_negotiation_ready(&self) -> bool { + self.pending_inbound_htlcs.is_empty() && + self.pending_outbound_htlcs.is_empty() && + self.pending_update_fee.is_none() && + self.channel_state & + (BOTH_SIDES_SHUTDOWN_MASK | + ChannelState::AwaitingRemoteRevoke as u32 | + ChannelState::PeerDisconnected as u32 | + ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK + } + /// Returns true if this channel is currently available for use. This is a superset of /// is_usable() and considers things like the channel being temporarily disabled. /// Allowed in any state (including after shutdown) @@ -3956,12 +3984,7 @@ impl Channel { /// this point if we're the funder we should send the initial closing_signed, and in any case /// shutdown should complete within a reasonable timeframe. fn closing_negotiation_ready(&self) -> bool { - self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() && - self.context.channel_state & - (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 | - ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) - == BOTH_SIDES_SHUTDOWN_MASK && - self.context.pending_update_fee.is_none() + self.context.closing_negotiation_ready() } /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 65f442f1c..d18e6537d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1464,6 +1464,9 @@ pub struct ChannelDetails { /// /// [`confirmations_required`]: ChannelDetails::confirmations_required pub is_channel_ready: bool, + /// The stage of the channel's shutdown. + /// `None` for `ChannelDetails` serialized on LDK versions prior to 0.0.116. + pub channel_shutdown_state: Option, /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b) /// the peer is connected, and (c) the channel is not currently negotiating a shutdown. /// @@ -1551,10 +1554,33 @@ impl ChannelDetails { inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()), inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(), config: Some(context.config()), + channel_shutdown_state: Some(context.shutdown_state()), } } } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +/// Further information on the details of the channel shutdown. +/// Upon channels being forced closed (i.e. commitment transaction confirmation detected +/// by `ChainMonitor`), ChannelShutdownState will be set to `ShutdownComplete` or +/// the channel will be removed shortly. +/// Also note, that in normal operation, peers could disconnect at any of these states +/// and require peer re-connection before making progress onto other states +pub enum ChannelShutdownState { + /// Channel has not sent or received a shutdown message. + NotShuttingDown, + /// Local node has sent a shutdown message for this channel. + ShutdownInitiated, + /// Shutdown message exchanges have concluded and the channels are in the midst of + /// resolving all existing open HTLCs before closing can continue. + ResolvingHTLCs, + /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates. + NegotiatingClosingFee, + /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about + /// to drop the channel. + ShutdownComplete, +} + /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments. /// These include payments that have yet to find a successful path, or have unresolved HTLCs. #[derive(Debug, PartialEq)] @@ -7349,6 +7375,7 @@ impl Writeable for ChannelDetails { (35, self.inbound_htlc_maximum_msat, option), (37, user_channel_id_high_opt, option), (39, self.feerate_sat_per_1000_weight, option), + (41, self.channel_shutdown_state, option), }); Ok(()) } @@ -7386,6 +7413,7 @@ impl Readable for ChannelDetails { (35, inbound_htlc_maximum_msat, option), (37, user_channel_id_high_opt, option), (39, feerate_sat_per_1000_weight, option), + (41, channel_shutdown_state, option), }); // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with @@ -7421,6 +7449,7 @@ impl Readable for ChannelDetails { inbound_htlc_minimum_msat, inbound_htlc_maximum_msat, feerate_sat_per_1000_weight, + channel_shutdown_state, }) } } @@ -7971,6 +8000,14 @@ impl Readable for VecDeque<(Event, Option)> { } } +impl_writeable_tlv_based_enum!(ChannelShutdownState, + (0, NotShuttingDown) => {}, + (2, ShutdownInitiated) => {}, + (4, ResolvingHTLCs) => {}, + (6, NegotiatingClosingFee) => {}, + (8, ShutdownComplete) => {}, ; +); + /// Arguments for the creation of a ChannelManager that are not deserialized. /// /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 64bd679a9..39730b4a9 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1951,6 +1951,16 @@ macro_rules! expect_payment_forwarded { } } +#[cfg(test)] +#[macro_export] +macro_rules! expect_channel_shutdown_state { + ($node: expr, $chan_id: expr, $state: path) => { + let chan_details = $node.node.list_channels().into_iter().filter(|cd| cd.channel_id == $chan_id).collect::>(); + assert_eq!(chan_details.len(), 1); + assert_eq!(chan_details[0].channel_shutdown_state, Some($state)); + } +} + #[cfg(any(test, ldk_bench, feature = "_test_utils"))] pub fn expect_channel_pending_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) { let events = node.node.get_and_clear_pending_events(); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index a2ec37a8a..3aa48c1b4 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -12,7 +12,7 @@ use crate::sign::{EntropySource, SignerProvider}; use crate::chain::transaction::OutPoint; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; -use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, ChannelShutdownState, ChannelDetails}; use crate::routing::router::{PaymentParameters, get_route}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, ErrorAction}; @@ -67,6 +67,169 @@ fn pre_funding_lock_shutdown_test() { check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } +#[test] +fn expect_channel_shutdown_state() { + // Test sending a shutdown prior to channel_ready after funding generation + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); + + nodes[0].node.close_channel(&chan_1.2, &nodes[1].node.get_our_node_id()).unwrap(); + + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ShutdownInitiated); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown); + + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); + + // node1 goes into NegotiatingClosingFee since there are no HTLCs in flight, note that it + // doesnt mean that node1 has sent/recved its closing signed message + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ShutdownInitiated); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NegotiatingClosingFee); + + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); + + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NegotiatingClosingFee); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NegotiatingClosingFee); + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + assert!(nodes[1].node.list_channels().is_empty()); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); +} + +#[test] +fn expect_channel_shutdown_state_with_htlc() { + // Test sending a shutdown with outstanding updates pending. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let _chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); + + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown); + + nodes[0].node.close_channel(&chan_1.2, &nodes[1].node.get_our_node_id()).unwrap(); + + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ShutdownInitiated); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown); + + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); + + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ShutdownInitiated); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::ResolvingHTLCs); + + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); + + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ResolvingHTLCs); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::ResolvingHTLCs); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Claim Funds on Node2 + nodes[2].node.claim_funds(payment_preimage_0); + check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); + + // Fulfil HTLCs on node1 and node0 + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + check_added_monitors!(nodes[1], 1); + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + // Still in "resolvingHTLCs" on chan1 after htlc removed on chan2 + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ResolvingHTLCs); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::ResolvingHTLCs); + + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fail_htlcs.is_empty()); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + expect_payment_sent!(nodes[0], payment_preimage_0); + + // all htlcs removed, chan1 advances to NegotiatingClosingFee + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NegotiatingClosingFee); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NegotiatingClosingFee); + + // ClosingSignNegotion process + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + + // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary + assert!(nodes[0].node.list_channels().is_empty()); +} + +#[test] +fn expect_channel_shutdown_state_with_force_closure() { + // Test sending a shutdown prior to channel_ready after funding generation + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); + expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown); + + nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap(); + check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); + + expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); + assert!(nodes[1].node.list_channels().is_empty()); + + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], chan_1.3); + mine_transaction(&nodes[0], &node_txn[0]); + check_added_monitors!(nodes[0], 1); + + assert!(nodes[0].node.list_channels().is_empty()); + assert!(nodes[1].node.list_channels().is_empty()); + check_closed_broadcast!(nodes[0], true); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); +} + #[test] fn updates_shutdown_wait() { // Test sending a shutdown with outstanding updates pending diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index a8da26c23..15f43d297 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -2687,7 +2687,8 @@ mod tests { inbound_htlc_minimum_msat: None, inbound_htlc_maximum_msat: None, config: None, - feerate_sat_per_1000_weight: None + feerate_sat_per_1000_weight: None, + channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown), } } @@ -6758,6 +6759,7 @@ pub(crate) mod bench_utils { inbound_htlc_maximum_msat: None, config: None, feerate_sat_per_1000_weight: None, + channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown), } } -- 2.39.5