From: Matt Corallo Date: Fri, 6 Sep 2024 18:00:44 +0000 (+0000) Subject: Move `PackageTemplate` merging decisions entirely into `package.rs` X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=09bab0ec61a04e53cf27e102b36fecd6b2cace7a;p=rust-lightning Move `PackageTemplate` merging decisions entirely into `package.rs` Currently our package merging logic is strewn about between `package.rs` (which decides various flags based on the package type) and `onchaintx.rs` (which does the actual merging based on the derived flags as well as its own logic), making the logic hard to follow. Instead, here we consolidate the package merging logic entirely into `package.rs` with a new `PackageTemplate::can_merge_with` method that decides if merging can happen. We also simplify the merge pass in `update_claims_view_from_requests` to try to maximally merge by testing each pair of `PackateTemplate`s we're given to see if they can be merged. This is overly complicated (and inefficient) for today's merge logic, but over the coming commits we'll expand when we can merge and not having to think about the merge pass' behavior makes that much simpler (and O(N^2) for <1000 elements done only once when a commitment transaction confirms is fine). --- diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 1cba3efb9..9ebf02252 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -31,7 +31,7 @@ use crate::ln::types::PaymentPreimage; use crate::ln::chan_utils::{self, ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction}; use crate::chain::ClaimId; use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER}; +use crate::chain::channelmonitor::ANTI_REORG_DELAY; use crate::chain::package::{PackageSolvingData, PackageTemplate}; use crate::chain::transaction::MaybeSignedTransaction; use crate::util::logger::Logger; @@ -726,7 +726,7 @@ impl OnchainTxHandler { /// does not need to equal the current blockchain tip height, which should be provided via /// `cur_height`, however it must never be higher than `cur_height`. pub(super) fn update_claims_view_from_requests( - &mut self, requests: Vec, conf_height: u32, cur_height: u32, + &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L ) where @@ -737,48 +737,50 @@ impl OnchainTxHandler { log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len()); } - let mut preprocessed_requests = Vec::with_capacity(requests.len()); - let mut aggregated_request = None; - - // Try to aggregate outputs if their timelock expiration isn't imminent (package timelock - // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable). - for req in requests { - // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while - if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) { + // First drop any claims which are duplicate + requests.retain(|req| { + if self.claimable_outpoints.get(req.outpoints()[0]).is_some() { log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout); + false } else { let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() .find(|locked_package| locked_package.outpoints() == req.outpoints()); if let Some(package) = timelocked_equivalent_package { log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height)); - continue; + false + } else { + true } + } + }); - let package_locktime = req.package_locktime(cur_height); - if package_locktime > cur_height + 1 { - log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height); - for outpoint in req.outpoints() { - log_info!(logger, " Outpoint {}", outpoint); - } - self.locktimed_packages.entry(package_locktime).or_default().push(req); - continue; + // Then try to maximally aggregate `requests` + for i in (1..requests.len()).rev() { + for j in 0..i { + if requests[i].can_merge_with(&requests[j], cur_height) { + let merge = requests.remove(i); + requests[j].merge_package(merge); + break; } + } + } - log_trace!(logger, "Test if outpoint which our counterparty can spend at {} against aggregation limit {}", req.counterparty_spendable_height(), cur_height + CLTV_SHARED_CLAIM_BUFFER); - if req.counterparty_spendable_height() <= cur_height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() { - // Don't aggregate if outpoint package timelock is soon or marked as non-aggregable - preprocessed_requests.push(req); - } else if aggregated_request.is_none() { - aggregated_request = Some(req); - } else { - aggregated_request.as_mut().unwrap().merge_package(req); + let mut preprocessed_requests = Vec::with_capacity(requests.len()); + + // Finally, split requests into timelocked ones and immediately-spendable ones. + for req in requests { + let package_locktime = req.package_locktime(cur_height); + if package_locktime > cur_height + 1 { + log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height); + for outpoint in req.outpoints() { + log_info!(logger, " Outpoint {}", outpoint); } + self.locktimed_packages.entry(package_locktime).or_default().push(req); + } else { + preprocessed_requests.push(req); } } - if let Some(req) = aggregated_request { - preprocessed_requests.push(req); - } // Claim everything up to and including `cur_height` let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 1)); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 635837bf8..f66585a44 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -30,6 +30,7 @@ use crate::ln::features::ChannelTypeFeatures; use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint}; use crate::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA; use crate::ln::msgs::DecodeError; +use crate::chain::channelmonitor::CLTV_SHARED_CLAIM_BUFFER; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT, compute_feerate_sat_per_1000_weight, FEERATE_FLOOR_SATS_PER_KW}; use crate::chain::transaction::MaybeSignedTransaction; use crate::sign::ecdsa::EcdsaChannelSigner; @@ -756,6 +757,12 @@ pub struct PackageTemplate { } impl PackageTemplate { + pub(crate) fn can_merge_with(&self, other: &PackageTemplate, cur_height: u32) -> bool { + self.aggregable() && other.aggregable() && + self.package_locktime(cur_height) == other.package_locktime(cur_height) && + self.counterparty_spendable_height() > cur_height + CLTV_SHARED_CLAIM_BUFFER && + other.counterparty_spendable_height() > cur_height + CLTV_SHARED_CLAIM_BUFFER + } pub(crate) fn is_malleable(&self) -> bool { self.malleability == PackageMalleability::Malleable } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index c2a32d3c9..523b862ae 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -7651,7 +7651,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { // Verify claim tx are spending revoked HTLC txn // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0] - // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn + // Note that node_txn[1] and node_txn[2] are bogus - they double spend the revoked_htlc_txn // which are included in the same block (they are broadcasted because we scan the // transactions linearly and generate claims as we go, they likely should be removed in the // future). @@ -7668,8 +7668,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output); assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output); - assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output); - assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output); + assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output); + assert_eq!(node_txn[2].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output); // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one // output, checked above). @@ -7681,7 +7681,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { // Store both feerates for later comparison let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value; feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu(); - penalty_txn = vec![node_txn[2].clone()]; + penalty_txn = vec![node_txn[0].clone()]; node_txn.clear(); }