Flatten Vec passed from channelmonitor to onchaintx block_connected
authorMatt Corallo <git@bluematt.me>
Wed, 4 Mar 2020 22:27:03 +0000 (17:27 -0500)
committerMatt Corallo <git@bluematt.me>
Wed, 4 Mar 2020 22:57:22 +0000 (17:57 -0500)
Instead of passing a Vec of Vecs drop them into one as we go in
ChannelMonitor, hopefully avoiding a bit of memory fragmentation
and improving readability.

lightning/src/ln/channelmonitor.rs
lightning/src/ln/onchaintx.rs

index ea9e7851e647afc385b3d789c300e18d72f8bace..205f4b011ac0fb32d62304f5bf1e3ceadbbf2ca1 100644 (file)
@@ -2010,7 +2010,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                                                watch_outputs.push(new_outputs);
                                                        }
                                                }
-                                               claimable_outpoints.push(new_outpoints);
+                                               claimable_outpoints.append(&mut new_outpoints);
                                        }
                                        if !funding_txo.is_none() && claimable_outpoints.is_empty() {
                                                if let Some(spendable_output) = self.check_spend_closing_transaction(&tx) {
@@ -2020,7 +2020,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                } else {
                                        if let Some(&(commitment_number, _)) = self.remote_commitment_txn_on_chain.get(&prevout.txid) {
                                                let mut new_outpoints = self.check_spend_remote_htlc(&tx, commitment_number, height);
-                                               claimable_outpoints.push(new_outpoints);
+                                               claimable_outpoints.append(&mut new_outpoints);
                                        }
                                }
                        }
index 1ec68f5eb4d5d1642c41916565fec7d9bc5a3fa1..c731bc079b657131df2a5fe7994ca6dc967d3f1f 100644 (file)
@@ -478,7 +478,7 @@ impl OnchainTxHandler {
                Some((new_timer, new_feerate, bumped_tx))
        }
 
-       pub(super) fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<Vec<ClaimRequest>>, height: u32, broadcaster: B, fee_estimator: F) -> Vec<SpendableOutputDescriptor>
+       pub(super) fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, height: u32, broadcaster: B, fee_estimator: F) -> Vec<SpendableOutputDescriptor>
                where B::Target: BroadcasterInterface,
                      F::Target: FeeEstimator
        {
@@ -489,20 +489,18 @@ impl OnchainTxHandler {
 
                // Try to aggregate outputs if they're 1) belong to same parent tx, 2) their
                // timelock expiration isn't imminent (<= CLTV_SHARED_CLAIM_BUFFER).
-               for siblings_outpoints in claimable_outpoints {
-                       for req in siblings_outpoints {
-                               // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
-                               if let Some(_) = self.claimable_outpoints.get(&req.outpoint) { log_trace!(self, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoint.txid, req.outpoint.vout); } else {
-                                       log_trace!(self, "Test if outpoint can be aggregated with expiration {} against {}", req.absolute_timelock, height + CLTV_SHARED_CLAIM_BUFFER);
-                                       if req.absolute_timelock <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
-                                               let mut single_input = HashMap::new();
-                                               single_input.insert(req.outpoint, req.witness_data);
-                                               new_claims.push((req.absolute_timelock, single_input));
-                                       } else {
-                                               aggregated_claim.insert(req.outpoint, req.witness_data);
-                                               if req.absolute_timelock < aggregated_soonest {
-                                                       aggregated_soonest = req.absolute_timelock;
-                                               }
+               for req in claimable_outpoints {
+                       // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
+                       if let Some(_) = self.claimable_outpoints.get(&req.outpoint) { log_trace!(self, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoint.txid, req.outpoint.vout); } else {
+                               log_trace!(self, "Test if outpoint can be aggregated with expiration {} against {}", req.absolute_timelock, height + CLTV_SHARED_CLAIM_BUFFER);
+                               if req.absolute_timelock <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
+                                       let mut single_input = HashMap::new();
+                                       single_input.insert(req.outpoint, req.witness_data);
+                                       new_claims.push((req.absolute_timelock, single_input));
+                               } else {
+                                       aggregated_claim.insert(req.outpoint, req.witness_data);
+                                       if req.absolute_timelock < aggregated_soonest {
+                                               aggregated_soonest = req.absolute_timelock;
                                        }
                                }
                        }