From 7b9c28a02113fe1f36d2b4d332f52fa898f8ba2f Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 15:48:44 -0800 Subject: [PATCH] Rename keys for OnchainTxHandler::claimable_outpoints map --- lightning/src/chain/onchaintx.rs | 44 ++++++++++++++++---------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 6cb59b590..faf3fe12f 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -251,12 +251,12 @@ pub struct OnchainTxHandler { #[cfg(anchors)] pending_claim_events: HashMap, - // Used to link outpoints claimed in a connected block to a pending claim request. - // Key is outpoint than monitor parsing has detected we have keys/scripts to claim - // Value is (pending claim request identifier, confirmation_block), identifier - // is txid of the initial claiming transaction and is immutable until outpoint is - // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if - // block with output gets disconnected. + // Used to link outpoints claimed in a connected block to a pending claim request. The keys + // represent the outpoints that our `ChannelMonitor` has detected we have keys/scripts to + // claim. The values track the pending claim request identifier and the initial confirmation + // block height, and are immutable until the outpoint has enough confirmations to meet our + // [`ANTI_REORG_DELAY`]. The initial confirmation block height is used to remove the entry if + // the block gets disconnected. #[cfg(test)] // Used in functional_test to verify sanitization pub claimable_outpoints: HashMap, #[cfg(not(test))] @@ -494,12 +494,12 @@ impl OnchainTxHandler // transaction is reorged out. let mut all_inputs_have_confirmed_spend = true; for outpoint in request_outpoints.iter() { - if let Some(first_claim_txid_height) = self.claimable_outpoints.get(*outpoint) { + if let Some((request_package_id, _)) = self.claimable_outpoints.get(*outpoint) { // We check for outpoint spends within claims individually rather than as a set // since requests can have outpoints split off. if !self.onchain_events_awaiting_threshold_conf.iter() .any(|event_entry| if let OnchainEvent::Claim { package_id } = event_entry.event { - first_claim_txid_height.0 == package_id + *request_package_id == package_id } else { // The onchain event is not a claim, keep seeking until we find one. false @@ -744,9 +744,9 @@ impl OnchainTxHandler // Scan all input to verify is one of the outpoint spent is of interest for us let mut claimed_outputs_material = Vec::new(); for inp in &tx.input { - if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) { + if let Some((package_id, _)) = self.claimable_outpoints.get(&inp.previous_output) { // If outpoint has claim request pending on it... - if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) { + if let Some(request) = self.pending_claim_requests.get_mut(package_id) { //... we need to verify equality between transaction outpoints and claim request // outpoints to know if transaction is the original claim or a bumped one issued // by us. @@ -766,7 +766,7 @@ impl OnchainTxHandler txid: tx.txid(), height: conf_height, block_hash: Some(conf_hash), - event: OnchainEvent::Claim { package_id: first_claim_txid_height.0 } + event: OnchainEvent::Claim { package_id: *package_id } }; if !self.onchain_events_awaiting_threshold_conf.contains(&entry) { self.onchain_events_awaiting_threshold_conf.push(entry); @@ -793,7 +793,7 @@ impl OnchainTxHandler } //TODO: recompute soonest_timelock to avoid wasting a bit on fees if at_least_one_drop { - bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone()); + bump_candidates.insert(*package_id, request.clone()); } } break; //No need to iterate further, either tx is our or their @@ -846,17 +846,17 @@ impl OnchainTxHandler } // Check if any pending claim request must be rescheduled - for (first_claim_txid, ref request) in self.pending_claim_requests.iter() { + for (package_id, request) in self.pending_claim_requests.iter() { if let Some(h) = request.timer() { if cur_height >= h { - bump_candidates.insert(*first_claim_txid, (*request).clone()); + bump_candidates.insert(*package_id, request.clone()); } } } // Build, bump and rebroadcast tx accordingly log_trace!(logger, "Bumping {} candidates", bump_candidates.len()); - for (first_claim_txid, request) in bump_candidates.iter() { + for (package_id, request) in bump_candidates.iter() { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(cur_height, &request, &*fee_estimator, &*logger) { match bump_claim { OnchainClaim::Tx(bump_tx) => { @@ -866,10 +866,10 @@ impl OnchainTxHandler #[cfg(anchors)] OnchainClaim::Event(claim_event) => { log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints()); - self.pending_claim_events.insert(*first_claim_txid, claim_event); + self.pending_claim_events.insert(*package_id, claim_event); }, } - if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) { + if let Some(request) = self.pending_claim_requests.get_mut(package_id) { request.set_timer(new_timer); request.set_feerate(new_feerate); } @@ -915,12 +915,12 @@ impl OnchainTxHandler //- resurect outpoint back in its claimable set and regenerate tx match entry.event { OnchainEvent::ContentiousOutpoint { package } => { - if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(package.outpoints()[0]) { - if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) { + if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) { + if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) { request.merge_package(package); // Using a HashMap guarantee us than if we have multiple outpoints getting // resurrected only one bump claim tx is going to be broadcast - bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone()); + bump_candidates.insert(pending_claim.clone(), request.clone()); } } }, @@ -930,7 +930,7 @@ impl OnchainTxHandler self.onchain_events_awaiting_threshold_conf.push(entry); } } - for (_first_claim_txid_height, request) in bump_candidates.iter_mut() { + for ((_package_id, _), request) in bump_candidates.iter_mut() { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) { request.set_timer(new_timer); request.set_feerate(new_feerate); @@ -942,7 +942,7 @@ impl OnchainTxHandler #[cfg(anchors)] OnchainClaim::Event(claim_event) => { log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints()); - self.pending_claim_events.insert(_first_claim_txid_height.0, claim_event); + self.pending_claim_events.insert(_package_id, claim_event); }, } } -- 2.39.5