From: Matt Corallo Date: Sat, 7 Sep 2024 15:46:50 +0000 (+0000) Subject: Aggregate packages with differing, but similar, locktimes X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=4b7a8ef8fc21604d403f1d6fc6cb42382b1c0e79;p=rust-lightning Aggregate packages with differing, but similar, locktimes In the previous commit we started aggregating claims much more aggressively but only did so when timelocks on HTLC timeout claims were identical. While there's nothing we can do to change this for HTLC-Timeout claims (on our commitment transactions), on counterparty commitment transactions there's no such restirction. However, we do need to avoid aggregating different claims with drastically different locktimes to avoid delaying claims too long, and thus introduce a new constant, `CLTV_DIFFERENCE_BATCH_CLAIM`, to limit this. This new batching has security implications as we may delay claiming an HTLC for up to that many blocks, and thus we make the constant public, update the `cltv_expiry_delta` docs, and update some constant security checks. --- diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 18693c8e9..5a36a55a4 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -233,6 +233,18 @@ pub(crate) const COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE: u32 = 12; /// expiring at the same time. const _HTLCS_NOT_PINNABLE_ON_CLOSE: u32 = CLTV_CLAIM_BUFFER - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; +/// When spending two outputs which need CLTVs this many blocks apart or less, we are willing to +/// delay claiming the first one in order to batch the claim. +/// +/// This may imply that an HTLC which has expired on-chain is not claimed for up to this many +/// blocks. +pub const CLTV_DIFFERENCE_BATCH_CLAIM: u32 = 6; +/// If we delay an HTLC claim a few blocks to do a batch claim, make sure that we still have plenty +/// of room to get our claim transactions confirmed (i.e. that we have at least +/// [`CLTV_CLAIM_BUFFER`] blocks to get it confirmed). +const _BATCH_CANT_CAUSE_HTLC_CLAIMS_TO_NOT_CONFIRM: u32 = + crate::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA as u32 - CLTV_DIFFERENCE_BATCH_CLAIM - CLTV_CLAIM_BUFFER; + /// If an HTLC expires within this many blocks, force-close the channel to broadcast the /// HTLC-Success transaction. /// In other words, this is an upper bound on how many blocks we think it can take us to get a diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index be6156aec..00a8fbfd7 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -30,7 +30,7 @@ use crate::ln::features::ChannelTypeFeatures; use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint}; use crate::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA; use crate::ln::msgs::DecodeError; -use crate::chain::channelmonitor::COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; +use crate::chain::channelmonitor::{COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, CLTV_DIFFERENCE_BATCH_CLAIM}; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT, compute_feerate_sat_per_1000_weight, FEERATE_FLOOR_SATS_PER_KW}; use crate::chain::transaction::MaybeSignedTransaction; use crate::sign::ecdsa::EcdsaChannelSigner; @@ -829,9 +829,53 @@ impl PackageTemplate { return false; } - // First, check if the two packages have compatible locktimes. - // We only want to aggregate claims if they have the same locktime. - if self.package_locktime(cur_height) != other.package_locktime(cur_height) { + // First check for claims which have a fixed absolute locktime. + // HTLC-Success and HTLC-Timeout claims, even with anchors, have a fixed locktime + // and cannot be aggregated with claims requiring a different locktime. + let mut self_fixed_locktime = None; + for (_, input) in self.inputs.iter() { + if let Some(cltv) = input.signed_locktime() { + assert!(self_fixed_locktime.is_none() || self_fixed_locktime == Some(cltv)); + self_fixed_locktime = Some(cltv); + #[cfg(not(debug_assertions))] + break; + } + } + + let mut other_fixed_locktime = None; + for (_, input) in other.inputs.iter() { + if let Some(cltv) = input.signed_locktime() { + assert!(other_fixed_locktime.is_none() || other_fixed_locktime == Some(cltv)); + other_fixed_locktime = Some(cltv); + #[cfg(not(debug_assertions))] + break; + } + } + + if self_fixed_locktime != other_fixed_locktime { + return false; + } + + // Then check if the two packages have compatible locktime ranges. + // We only want to aggregate claims if they're within `CLTV_DIFFERENCE_BATCH_CLAIM` + // of each other. + let mut self_min_locktime = u32::MAX; + let mut self_max_locktime = 0; + for locktime in self.inputs.iter().map(|(_, input)| input.absolute_tx_timelock(cur_height)) { + self_min_locktime = cmp::min(self_min_locktime, locktime); + self_max_locktime = cmp::max(self_max_locktime, locktime); + } + let mut other_min_locktime = u32::MAX; + let mut other_max_locktime = 0; + for locktime in other.inputs.iter().map(|(_, input)| input.absolute_tx_timelock(cur_height)) { + other_min_locktime = cmp::min(other_min_locktime, locktime); + other_max_locktime = cmp::max(other_max_locktime, locktime); + } + + if self_min_locktime + CLTV_DIFFERENCE_BATCH_CLAIM < other_max_locktime { + return false; + } + if other_min_locktime + CLTV_DIFFERENCE_BATCH_CLAIM < self_max_locktime { return false; } @@ -850,6 +894,10 @@ impl PackageTemplate { let other_unpinnable = self_cluster == AggregationCluster::Unpinnable && other.counterparty_spendable_height() < cur_height - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; if self_unpinnable && other_unpinnable { + debug_assert_eq!(self_min_locktime, cur_height, "Unpinnable clusters have no locktime"); + debug_assert_eq!(self_max_locktime, cur_height, "Unpinnable clusters have no locktime"); + debug_assert_eq!(other_min_locktime, cur_height, "Unpinnable clusters have no locktime"); + debug_assert_eq!(other_max_locktime, cur_height, "Unpinnable clusters have no locktime"); return true; } self_cluster == other_cluster && self.counterparty_spendable_height() == other.counterparty_spendable_height() diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b3047930f..673e3a230 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2454,12 +2454,14 @@ pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3; // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, // ie that if the next-hop peer fails the HTLC within -// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain, -// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and -// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before -// LATENCY_GRACE_PERIOD_BLOCKS. -#[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; +// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER +// (+CLTV_DIFFERENCE_BATCH_CLAIM) left to timeout on chain with ANTI_REORG_DELAY blocks +// thereafter to wait for reorg-safety before we have to fail the inbound HTLC backwards within +// LATENCY_GRACE_PERIOD_BLOCKS (preventing our counterparty from force-closing on us). +const _CHECK_CLTV_EXPIRY_SANITY: u32 = + MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - + crate::chain::channelmonitor::CLTV_DIFFERENCE_BATCH_CLAIM - + ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See // ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed. diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index 3a0885de7..0863d9c31 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -454,12 +454,17 @@ pub struct ChannelConfig { /// enough time to broadcast and confirm a transaction, possibly with time in between to RBF /// the spending transaction). /// + /// Note that on-chain HTLC claims may be delayed by [`CLTV_DIFFERENCE_BATCH_CLAIM`] in order + /// to batch claims, and thus you might wish to consider adding that value to your desired + /// delta. + /// /// Default value: `72` (12 hours at an average of 6 blocks/hour) /// /// Minimum value: [`MIN_CLTV_EXPIRY_DELTA`] (Any values less than this will be treated as /// [`MIN_CLTV_EXPIRY_DELTA`] instead.) /// /// [`MIN_CLTV_EXPIRY_DELTA`]: crate::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA + /// [`CLTV_DIFFERENCE_BATCH_CLAIM`]: crate::chain::channelmonitor::CLTV_DIFFERENCE_BATCH_CLAIM pub cltv_expiry_delta: u16, /// Limit our total exposure to potential loss to on-chain fees on close, including in-flight /// HTLCs which are burned to fees as they are too small to claim on-chain and fees on