From 9b5ebc4bec7e8aa01de26b855c0939fcf698c994 Mon Sep 17 00:00:00 2001 From: benthecarman Date: Tue, 20 Feb 2024 18:47:50 +0000 Subject: [PATCH] Add HTLCsTimedOut closing reason Before a force closure from timed out HTLCs was treated the same as when the user manually force closed the channel. This leads to various UX issues. This adds a new `ClosureReason` called `HTLCsTimedOut` that signifies that the closure was caused because the HTLCs timed out. To go along with this, previously we'd always send "Channel force-closed" when force closing the channel in the error message which was ambigous, now we send the force closure reason so the peer can know why the channel was closed. --- lightning/src/chain/channelmonitor.rs | 62 ++++++++++++++++++++++++--- lightning/src/events/mod.rs | 6 ++- lightning/src/ln/channelmanager.rs | 11 +++-- lightning/src/ln/functional_tests.rs | 12 +++--- lightning/src/ln/monitor_tests.rs | 4 +- lightning/src/ln/reorg_tests.rs | 4 +- 6 files changed, 79 insertions(+), 20 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 5c81a5137..4352076e9 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -50,7 +50,7 @@ use crate::chain::Filter; use crate::util::logger::{Logger, Record}; use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48}; use crate::util::byte_utils; -use crate::events::{Event, EventHandler}; +use crate::events::{ClosureReason, Event, EventHandler}; use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent}; use crate::prelude::*; @@ -155,6 +155,17 @@ pub enum MonitorEvent { /// A monitor event containing an HTLCUpdate. HTLCEvent(HTLCUpdate), + /// Indicates we broadcasted the channel's latest commitment transaction and thus closed the + /// channel. Holds information about the channel and why it was closed. + HolderForceClosedWithInfo { + /// The reason the channel was closed. + reason: ClosureReason, + /// The funding outpoint of the channel. + outpoint: OutPoint, + /// The channel ID of the channel. + channel_id: ChannelId, + }, + /// Indicates we broadcasted the channel's latest commitment transaction and thus closed the /// channel. HolderForceClosed(OutPoint), @@ -184,6 +195,11 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent, (2, monitor_update_id, required), (4, channel_id, required), }, + (5, HolderForceClosedWithInfo) => { + (0, reason, upgradable_required), + (2, outpoint, required), + (4, channel_id, required), + }, ; (2, HTLCEvent), (4, HolderForceClosed), @@ -1059,6 +1075,7 @@ impl Writeable for ChannelMonitorImpl true, MonitorEvent::HolderForceClosed(_) => true, + MonitorEvent::HolderForceClosedWithInfo { .. } => true, _ => false, }).count() as u64).to_be_bytes())?; for event in self.pending_monitor_events.iter() { @@ -1068,6 +1085,10 @@ impl Writeable for ChannelMonitorImpl 1u8.write(writer)?, + // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. To keep + // backwards compatibility, we write a `HolderForceClosed` event along with the + // `HolderForceClosedWithInfo` event. This is deduplicated in the reader. + MonitorEvent::HolderForceClosedWithInfo { .. } => 1u8.write(writer)?, _ => {}, // Covered in the TLV writes below } } @@ -1099,10 +1120,23 @@ impl Writeable for ChannelMonitorImpl true, + _ => false, + }) { + Some(MonitorEvent::HolderForceClosedWithInfo { outpoint, .. }) => { + let mut pending_monitor_events = self.pending_monitor_events.clone(); + pending_monitor_events.push(MonitorEvent::HolderForceClosed(*outpoint)); + pending_monitor_events + } + _ => self.pending_monitor_events.clone(), + }; + write_tlv_fields!(writer, { (1, self.funding_spend_confirmed, option), (3, self.htlcs_resolved_on_chain, required_vec), - (5, self.pending_monitor_events, required_vec), + (5, pending_monitor_events, required_vec), (7, self.funding_spend_seen, required), (9, self.counterparty_node_id, option), (11, self.confirmed_commitment_tx_counterparty_output, option), @@ -2727,7 +2761,7 @@ impl ChannelMonitorImpl { } } - fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec, Vec) { + fn generate_claimable_outpoints_and_watch_outputs(&mut self, reason: ClosureReason) -> (Vec, Vec) { let funding_outp = HolderFundingOutput::build( self.funding_redeemscript.clone(), self.channel_value_satoshis, @@ -2739,7 +2773,13 @@ impl ChannelMonitorImpl { self.best_block.height, self.best_block.height ); let mut claimable_outpoints = vec![commitment_package]; - self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0)); + let event = MonitorEvent::HolderForceClosedWithInfo { + reason, + outpoint: self.funding_info.0, + channel_id: self.channel_id, + }; + self.pending_monitor_events.push(event); + // Although we aren't signing the transaction directly here, the transaction will be signed // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject // new channel updates. @@ -2775,7 +2815,7 @@ impl ChannelMonitorImpl { F::Target: FeeEstimator, L::Target: Logger, { - let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(); + let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed); self.onchain_tx_handler.update_claims_view_from_requests( claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster, fee_estimator, logger @@ -3778,7 +3818,7 @@ impl ChannelMonitorImpl { let should_broadcast = self.should_broadcast_holder_commitment_txn(logger); if should_broadcast { - let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs(); + let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HTLCsTimedOut); claimable_outpoints.append(&mut new_outpoints); watch_outputs.append(&mut new_outputs); } @@ -4605,6 +4645,16 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (19, channel_id, option), }); + // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both + // events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`. + if let Some(ref mut pending_monitor_events) = pending_monitor_events { + if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) && + pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. })) + { + pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_))); + } + } + // Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the // wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to // give them a chance to recognize the spendable output. diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 485a23e02..bc963ef2e 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -232,6 +232,8 @@ pub enum ClosureReason { /// Another channel in the same funding batch closed before the funding transaction /// was ready to be broadcast. FundingBatchClosure, + /// One of our HTLCs timed out in a channel, causing us to force close the channel. + HTLCsTimedOut, } impl core::fmt::Display for ClosureReason { @@ -241,7 +243,7 @@ impl core::fmt::Display for ClosureReason { ClosureReason::CounterpartyForceClosed { peer_msg } => { f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg)) }, - ClosureReason::HolderForceClosed => f.write_str("user manually force-closed the channel"), + ClosureReason::HolderForceClosed => f.write_str("user force-closed the channel"), ClosureReason::LegacyCooperativeClosure => f.write_str("the channel was cooperatively closed"), ClosureReason::CounterpartyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by our peer"), ClosureReason::LocallyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by us"), @@ -255,6 +257,7 @@ impl core::fmt::Display for ClosureReason { ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"), ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"), ClosureReason::FundingBatchClosure => f.write_str("another channel in the same funding batch closed"), + ClosureReason::HTLCsTimedOut => f.write_str("htlcs on the channel timed out"), } } } @@ -272,6 +275,7 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason, (15, FundingBatchClosure) => {}, (17, CounterpartyInitiatedCooperativeClosure) => {}, (19, LocallyInitiatedCooperativeClosure) => {}, + (21, HTLCsTimedOut) => {}, ); /// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`]. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6426f0925..64bfcec51 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7374,7 +7374,7 @@ where self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } }, - MonitorEvent::HolderForceClosed(_funding_outpoint) => { + MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => { let counterparty_node_id_opt = match counterparty_node_id { Some(cp_id) => Some(cp_id), None => { @@ -7392,7 +7392,12 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) { if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) { - failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed)); + let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event { + reason + } else { + ClosureReason::HolderForceClosed + }; + failed_channels.push(chan.context.force_shutdown(false, reason.clone())); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -7401,7 +7406,7 @@ where pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { - msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }) + msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() }) }, }); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index c5ea582b2..8c8be0b83 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2417,7 +2417,7 @@ fn channel_monitor_network_test() { } check_added_monitors!(nodes[4], 1); test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); - check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000); mine_transaction(&nodes[4], &node_txn[0]); check_preimage_claim(&nodes[4], &node_txn); @@ -2430,7 +2430,7 @@ fn channel_monitor_network_test() { assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon), Ok(ChannelMonitorUpdateStatus::Completed)); - check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000); } #[test] @@ -5682,7 +5682,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000); } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { @@ -5713,7 +5713,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -5759,7 +5759,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000); } else { expect_payment_failed!(nodes[0], our_payment_hash, true); } @@ -8654,7 +8654,7 @@ fn test_concurrent_monitor_claim() { let height = HTLC_TIMEOUT_BROADCAST + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); check_closed_broadcast(&nodes[0], 1, true); - check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false, + check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, [nodes[1].node.get_our_node_id()], 100000); watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height); check_added_monitors(&nodes[0], 1); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 25084689f..d5f0dc153 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -1188,14 +1188,14 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ assert!(failed_payments.is_empty()); if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); } match &events[1] { - Event::ChannelClosed { reason: ClosureReason::HolderForceClosed, .. } => {}, + Event::ChannelClosed { reason: ClosureReason::HTLCsTimedOut, .. } => {}, _ => panic!(), } connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000); // Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only // lists the two on-chain timeout-able HTLCs as claimable balances. diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index a31d9520d..62c82b01f 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -465,7 +465,7 @@ fn test_set_outpoints_partial_claiming() { // Connect blocks on node B connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000); check_added_monitors!(nodes[1], 1); // Verify node B broadcast 2 HTLC-timeout txn let partial_claim_tx = { @@ -807,7 +807,7 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_c connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000); + check_closed_event(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, &[nodes[1].node.get_our_node_id()], 100_000); { let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); -- 2.39.5