X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fonchaintx.rs;h=cd0cb08eab3522c00a057a7e9043c45c6a2a43a0;hb=b8ed4d2608e32128dd5a1dee92911638a4301138;hp=faf3fe12f60b1787494d803b77ce6abbe31b6017;hpb=7b9c28a02113fe1f36d2b4d332f52fa898f8ba2f;p=rust-lightning diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index faf3fe12..cd0cb08e 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -12,6 +12,8 @@ //! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all //! building, tracking, bumping and notifications functions. +#[cfg(anchors)] +use bitcoin::PackedLockTime; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint; use bitcoin::blockdata::script::Script; @@ -201,6 +203,7 @@ pub(crate) enum ClaimEvent { BumpHTLC { target_feerate_sat_per_1000_weight: u32, htlcs: Vec, + tx_lock_time: PackedLockTime, }, } @@ -248,8 +251,19 @@ pub struct OnchainTxHandler { pub(crate) pending_claim_requests: HashMap, #[cfg(not(test))] pending_claim_requests: HashMap, + + // Used to track external events that need to be forwarded to the `ChainMonitor`. This `Vec` + // essentially acts as an insertion-ordered `HashMap` – there should only ever be one occurrence + // of a `PackageID`, which tracks its latest `ClaimEvent`, i.e., if a pending claim exists, and + // a new block has been connected, resulting in a new claim, the previous will be replaced with + // the new. + // + // These external events may be generated in the following cases: + // - A channel has been force closed by broadcasting the holder's latest commitment transaction + // - A block being connected/disconnected + // - Learning the preimage for an HTLC we can claim onchain #[cfg(anchors)] - pending_claim_events: HashMap, + pending_claim_events: Vec<(PackageID, ClaimEvent)>, // Used to link outpoints claimed in a connected block to a pending claim request. The keys // represent the outpoints that our `ChannelMonitor` has detected we have keys/scripts to @@ -426,7 +440,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP pending_claim_requests, onchain_events_awaiting_threshold_conf, #[cfg(anchors)] - pending_claim_events: HashMap::new(), + pending_claim_events: Vec::new(), secp_ctx, }) } @@ -447,8 +461,7 @@ impl OnchainTxHandler locktimed_packages: BTreeMap::new(), onchain_events_awaiting_threshold_conf: Vec::new(), #[cfg(anchors)] - pending_claim_events: HashMap::new(), - + pending_claim_events: Vec::new(), secp_ctx, } } @@ -463,9 +476,9 @@ impl OnchainTxHandler #[cfg(anchors)] pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec { - let mut ret = HashMap::new(); - swap(&mut ret, &mut self.pending_claim_events); - ret.into_iter().map(|(_, event)| event).collect::>() + let mut events = Vec::new(); + swap(&mut events, &mut self.pending_claim_events); + events.into_iter().map(|(_, event)| event).collect() } /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty @@ -476,7 +489,7 @@ impl OnchainTxHandler /// /// Panics if there are signing errors, because signing operations in reaction to on-chain /// events are not expected to fail, and if they do, we may lose funds. - fn generate_claim(&mut self, cur_height: u32, cached_request: &PackageTemplate, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) -> Option<(Option, u64, OnchainClaim)> + fn generate_claim(&mut self, cur_height: u32, cached_request: &PackageTemplate, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) -> Option<(u32, u64, OnchainClaim)> where F::Target: FeeEstimator, L::Target: Logger, { @@ -520,7 +533,7 @@ impl OnchainTxHandler // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it). - let new_timer = Some(cached_request.get_height_timer(cur_height)); + let new_timer = cached_request.get_height_timer(cur_height); if cached_request.is_malleable() { #[cfg(anchors)] { // Attributes are not allowed on if expressions on our current MSRV of 1.41. @@ -534,6 +547,7 @@ impl OnchainTxHandler OnchainClaim::Event(ClaimEvent::BumpHTLC { target_feerate_sat_per_1000_weight, htlcs, + tx_lock_time: PackedLockTime(cached_request.package_locktime(cur_height)), }), )); } else { @@ -548,8 +562,10 @@ impl OnchainTxHandler ) { assert!(new_feerate != 0); - let transaction = cached_request.finalize_malleable_package(self, output_value, self.destination_script.clone(), logger).unwrap(); - log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate); + let transaction = cached_request.finalize_malleable_package( + cur_height, self, output_value, self.destination_script.clone(), logger + ).unwrap(); + log_trace!(logger, "...with timer {} and feerate {}", new_timer, new_feerate); assert!(predicted_weight >= transaction.weight()); return Some((new_timer, new_feerate, OnchainClaim::Tx(transaction))); } @@ -567,7 +583,7 @@ impl OnchainTxHandler None => return None, }; if !cached_request.requires_external_funding() { - return Some((None, 0, OnchainClaim::Tx(tx))); + return Some((new_timer, 0, OnchainClaim::Tx(tx))); } #[cfg(anchors)] return inputs.find_map(|input| match input { @@ -600,7 +616,7 @@ impl OnchainTxHandler // attempt to broadcast the transaction with its current fee rate and hope // it confirms. This is essentially the same behavior as a commitment // transaction without anchor outputs. - None => Some((None, 0, OnchainClaim::Tx(tx.clone()))), + None => Some((new_timer, 0, OnchainClaim::Tx(tx.clone()))), } }, _ => { @@ -644,16 +660,17 @@ impl OnchainTxHandler .find(|locked_package| locked_package.outpoints() == req.outpoints()); if let Some(package) = timelocked_equivalent_package { log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", - req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_timelock()); + req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height)); continue; } - if req.package_timelock() > cur_height + 1 { - log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), cur_height); + let package_locktime = req.package_locktime(cur_height); + if package_locktime > cur_height + 1 { + log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height); for outpoint in req.outpoints() { log_info!(logger, " Outpoint {}", outpoint); } - self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req); + self.locktimed_packages.entry(package_locktime).or_insert(Vec::new()).push(req); continue; } @@ -709,7 +726,8 @@ impl OnchainTxHandler package_id }, }; - self.pending_claim_events.insert(package_id, claim_event); + debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == package_id).count(), 0); + self.pending_claim_events.push((package_id, claim_event)); package_id }, }; @@ -794,6 +812,20 @@ impl OnchainTxHandler //TODO: recompute soonest_timelock to avoid wasting a bit on fees if at_least_one_drop { bump_candidates.insert(*package_id, request.clone()); + // If we have any pending claim events for the request being updated + // that have yet to be consumed, we'll remove them since they will + // end up producing an invalid transaction by double spending + // input(s) that already have a confirmed spend. If such spend is + // reorged out of the chain, then we'll attempt to re-spend the + // inputs once we see it. + #[cfg(anchors)] { + #[cfg(debug_assertions)] { + let existing = self.pending_claim_events.iter() + .filter(|entry| entry.0 == *package_id).count(); + assert!(existing == 0 || existing == 1); + } + self.pending_claim_events.retain(|entry| entry.0 != *package_id); + } } } break; //No need to iterate further, either tx is our or their @@ -829,8 +861,14 @@ impl OnchainTxHandler log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.", outpoint, log_bytes!(package_id)); self.claimable_outpoints.remove(outpoint); - #[cfg(anchors)] - self.pending_claim_events.remove(&package_id); + } + #[cfg(anchors)] { + #[cfg(debug_assertions)] { + let num_existing = self.pending_claim_events.iter() + .filter(|entry| entry.0 == package_id).count(); + assert!(num_existing == 0 || num_existing == 1); + } + self.pending_claim_events.retain(|(id, _)| *id != package_id); } } }, @@ -847,10 +885,8 @@ impl OnchainTxHandler // Check if any pending claim request must be rescheduled for (package_id, request) in self.pending_claim_requests.iter() { - if let Some(h) = request.timer() { - if cur_height >= h { - bump_candidates.insert(*package_id, request.clone()); - } + if cur_height >= request.timer() { + bump_candidates.insert(*package_id, request.clone()); } } @@ -866,7 +902,13 @@ impl OnchainTxHandler #[cfg(anchors)] OnchainClaim::Event(claim_event) => { log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints()); - self.pending_claim_events.insert(*package_id, claim_event); + #[cfg(debug_assertions)] { + let num_existing = self.pending_claim_events.iter(). + filter(|entry| entry.0 == *package_id).count(); + assert!(num_existing == 0 || num_existing == 1); + } + self.pending_claim_events.retain(|event| event.0 != *package_id); + self.pending_claim_events.push((*package_id, claim_event)); }, } if let Some(request) = self.pending_claim_requests.get_mut(package_id) { @@ -930,7 +972,7 @@ impl OnchainTxHandler self.onchain_events_awaiting_threshold_conf.push(entry); } } - for ((_package_id, _), request) in bump_candidates.iter_mut() { + for ((_package_id, _), ref mut request) in bump_candidates.iter_mut() { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) { request.set_timer(new_timer); request.set_feerate(new_feerate); @@ -942,7 +984,13 @@ impl OnchainTxHandler #[cfg(anchors)] OnchainClaim::Event(claim_event) => { log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints()); - self.pending_claim_events.insert(_package_id, claim_event); + #[cfg(debug_assertions)] { + let num_existing = self.pending_claim_events.iter() + .filter(|entry| entry.0 == *_package_id).count(); + assert!(num_existing == 0 || num_existing == 1); + } + self.pending_claim_events.retain(|event| event.0 != *_package_id); + self.pending_claim_events.push((*_package_id, claim_event)); }, } }