use crate::ln::chan_utils::{self, ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction};
use crate::chain::ClaimId;
use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
+use crate::chain::channelmonitor::ANTI_REORG_DELAY;
use crate::chain::package::{PackageSolvingData, PackageTemplate};
use crate::chain::transaction::MaybeSignedTransaction;
use crate::util::logger::Logger;
/// does not need to equal the current blockchain tip height, which should be provided via
/// `cur_height`, however it must never be higher than `cur_height`.
pub(super) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Logger>(
- &mut self, requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
+ &mut self, mut requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
broadcaster: &B, conf_target: ConfirmationTarget,
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
) where
log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
}
- let mut preprocessed_requests = Vec::with_capacity(requests.len());
- let mut aggregated_request = None;
-
- // Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
- // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
- for req in requests {
- // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
- if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
+ // First drop any claims which are duplicate
+ requests.retain(|req| {
+ if self.claimable_outpoints.get(req.outpoints()[0]).is_some() {
log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
+ false
} else {
let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
.find(|locked_package| locked_package.outpoints() == req.outpoints());
if let Some(package) = timelocked_equivalent_package {
log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height));
- continue;
+ false
+ } else {
+ true
}
+ }
+ });
- let package_locktime = req.package_locktime(cur_height);
- if package_locktime > cur_height + 1 {
- log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
- for outpoint in req.outpoints() {
- log_info!(logger, " Outpoint {}", outpoint);
- }
- self.locktimed_packages.entry(package_locktime).or_default().push(req);
- continue;
+ // Then try to maximally aggregate `requests`
+ for i in (1..requests.len()).rev() {
+ for j in 0..i {
+ if requests[i].can_merge_with(&requests[j], cur_height) {
+ let merge = requests.remove(i);
+ requests[j].merge_package(merge);
+ break;
}
+ }
+ }
- log_trace!(logger, "Test if outpoint which our counterparty can spend at {} against aggregation limit {}", req.counterparty_spendable_height(), cur_height + CLTV_SHARED_CLAIM_BUFFER);
- if req.counterparty_spendable_height() <= cur_height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
- // Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
- preprocessed_requests.push(req);
- } else if aggregated_request.is_none() {
- aggregated_request = Some(req);
- } else {
- aggregated_request.as_mut().unwrap().merge_package(req);
+ let mut preprocessed_requests = Vec::with_capacity(requests.len());
+
+ // Finally, split requests into timelocked ones and immediately-spendable ones.
+ for req in requests {
+ let package_locktime = req.package_locktime(cur_height);
+ if package_locktime > cur_height + 1 {
+ log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
+ for outpoint in req.outpoints() {
+ log_info!(logger, " Outpoint {}", outpoint);
}
+ self.locktimed_packages.entry(package_locktime).or_default().push(req);
+ } else {
+ preprocessed_requests.push(req);
}
}
- if let Some(req) = aggregated_request {
- preprocessed_requests.push(req);
- }
// Claim everything up to and including `cur_height`
let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 1));
use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
use crate::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA;
use crate::ln::msgs::DecodeError;
+use crate::chain::channelmonitor::CLTV_SHARED_CLAIM_BUFFER;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT, compute_feerate_sat_per_1000_weight, FEERATE_FLOOR_SATS_PER_KW};
use crate::chain::transaction::MaybeSignedTransaction;
use crate::sign::ecdsa::EcdsaChannelSigner;
}
impl PackageTemplate {
+ pub(crate) fn can_merge_with(&self, other: &PackageTemplate, cur_height: u32) -> bool {
+ self.aggregable() && other.aggregable() &&
+ self.package_locktime(cur_height) == other.package_locktime(cur_height) &&
+ self.counterparty_spendable_height() > cur_height + CLTV_SHARED_CLAIM_BUFFER &&
+ other.counterparty_spendable_height() > cur_height + CLTV_SHARED_CLAIM_BUFFER
+ }
pub(crate) fn is_malleable(&self) -> bool {
self.malleability == PackageMalleability::Malleable
}
// Verify claim tx are spending revoked HTLC txn
// node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
- // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
+ // Note that node_txn[1] and node_txn[2] are bogus - they double spend the revoked_htlc_txn
// which are included in the same block (they are broadcasted because we scan the
// transactions linearly and generate claims as we go, they likely should be removed in the
// future).
assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
- assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
- assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
+ assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
+ assert_eq!(node_txn[2].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
// node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
// output, checked above).
// Store both feerates for later comparison
let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu();
- penalty_txn = vec![node_txn[2].clone()];
+ penalty_txn = vec![node_txn[0].clone()];
node_txn.clear();
}