Add test_bump_txn_sanitize_tracking_maps
[rust-lightning] / lightning / src / ln / channelmonitor.rs
index 72f11fe25f241406ae4d2a2bc12314ea01f293a8..d83171bd1ec981a96500a1356ea531034d4b359f 100644 (file)
@@ -513,7 +513,7 @@ enum OnchainEvent {
 
 /// Higher-level cache structure needed to re-generate bumped claim txn if needed
 #[derive(Clone, PartialEq)]
-struct ClaimTxBumpMaterial {
+pub struct ClaimTxBumpMaterial {
        // At every block tick, used to check if pending claiming tx is taking too
        // much time for confirmation and we need to bump it.
        height_timer: u32,
@@ -621,6 +621,9 @@ pub struct ChannelMonitor {
        // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
        // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
        // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
+       #[cfg(test)] // Used in functional_test to verify sanitization
+       pub pending_claim_requests: HashMap<Sha256dHash, ClaimTxBumpMaterial>,
+       #[cfg(not(test))]
        pending_claim_requests: HashMap<Sha256dHash, ClaimTxBumpMaterial>,
 
        // Used to link outpoints claimed in a connected block to a pending claim request.
@@ -629,6 +632,9 @@ pub struct ChannelMonitor {
        // is txid of the initial claiming transaction and is immutable until outpoint is
        // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
        // block with output gets disconnected.
+       #[cfg(test)] // Used in functional_test to verify sanitization
+       pub claimable_outpoints: HashMap<BitcoinOutPoint, (Sha256dHash, u32)>,
+       #[cfg(not(test))]
        claimable_outpoints: HashMap<BitcoinOutPoint, (Sha256dHash, u32)>,
 
        // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
@@ -2331,7 +2337,7 @@ impl ChannelMonitor {
                let mut watch_outputs = Vec::new();
                let mut spendable_outputs = Vec::new();
                let mut htlc_updated = Vec::new();
-               let mut bump_candidates = Vec::new();
+               let mut bump_candidates = HashMap::new();
                for tx in txn_matched {
                        if tx.input.len() == 1 {
                                // Assuming our keys were not leaked (in which case we're screwed no matter what),
@@ -2414,29 +2420,39 @@ impl ChannelMonitor {
                                                        }
                                                }
 
-                                               // If this is our transaction (or our counterparty spent all the outputs
-                                               // before we could anyway), wait for ANTI_REORG_DELAY and clean the RBF
-                                               // tracking map.
-                                               if set_equality {
-                                                       let new_event = OnchainEvent::Claim { claim_request: ancestor_claimable_txid.0.clone() };
-                                                       match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
-                                                               hash_map::Entry::Occupied(mut entry) => {
-                                                                       if !entry.get().contains(&new_event) {
-                                                                               entry.get_mut().push(new_event);
+                                               macro_rules! clean_claim_request_after_safety_delay {
+                                                       () => {
+                                                               let new_event = OnchainEvent::Claim { claim_request: ancestor_claimable_txid.0.clone() };
+                                                               match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+                                                                       hash_map::Entry::Occupied(mut entry) => {
+                                                                               if !entry.get().contains(&new_event) {
+                                                                                       entry.get_mut().push(new_event);
+                                                                               }
+                                                                       },
+                                                                       hash_map::Entry::Vacant(entry) => {
+                                                                               entry.insert(vec![new_event]);
                                                                        }
-                                                               },
-                                                               hash_map::Entry::Vacant(entry) => {
-                                                                       entry.insert(vec![new_event]);
                                                                }
                                                        }
+                                               }
+
+                                               // If this is our transaction (or our counterparty spent all the outputs
+                                               // before we could anyway with same inputs order than us), wait for
+                                               // ANTI_REORG_DELAY and clean the RBF tracking map.
+                                               if set_equality {
+                                                       clean_claim_request_after_safety_delay!();
                                                } else { // If false, generate new claim request with update outpoint set
                                                        for input in tx.input.iter() {
                                                                if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) {
                                                                        claimed_outputs_material.push((input.previous_output, input_material));
                                                                }
+                                                               // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
+                                                               if claim_material.per_input_material.is_empty() {
+                                                                       clean_claim_request_after_safety_delay!();
+                                                               }
                                                        }
                                                        //TODO: recompute soonest_timelock to avoid wasting a bit on fees
-                                                       bump_candidates.push((ancestor_claimable_txid.0.clone(), claim_material.clone()));
+                                                       bump_candidates.insert(ancestor_claimable_txid.0.clone(), claim_material.clone());
                                                }
                                                break; //No need to iterate further, either tx is our or their
                                        } else {
@@ -2492,8 +2508,13 @@ impl ChannelMonitor {
                        for ev in events {
                                match ev {
                                        OnchainEvent::Claim { claim_request } => {
-                                               // We may remove a whole set of claim outpoints here, as these one may have been aggregated in a single tx and claimed so atomically
-                                               self.pending_claim_requests.remove(&claim_request);
+                                               // We may remove a whole set of claim outpoints here, as these one may have
+                                               // been aggregated in a single tx and claimed so atomically
+                                               if let Some(bump_material) = self.pending_claim_requests.remove(&claim_request) {
+                                                       for outpoint in bump_material.per_input_material.keys() {
+                                                               self.claimable_outpoints.remove(&outpoint);
+                                                       }
+                                               }
                                        },
                                        OnchainEvent::HTLCUpdate { htlc_update } => {
                                                log_trace!(self, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0));
@@ -2507,17 +2528,19 @@ impl ChannelMonitor {
                }
                for (ancestor_claim_txid, ref mut cached_claim_datas) in self.pending_claim_requests.iter_mut() {
                        if cached_claim_datas.height_timer == height {
-                               bump_candidates.push((ancestor_claim_txid.clone(), cached_claim_datas.clone()));
+                               if let hash_map::Entry::Vacant(entry) = bump_candidates.entry(ancestor_claim_txid.clone()) {
+                                       entry.insert(cached_claim_datas.clone());
+                               }
                        }
                }
-               for &mut (_, ref mut cached_claim_datas) in bump_candidates.iter_mut() {
+               for ref mut cached_claim_datas in bump_candidates.values_mut() {
                        if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &cached_claim_datas, fee_estimator) {
                                cached_claim_datas.height_timer = new_timer;
                                cached_claim_datas.feerate_previous = new_feerate;
                                broadcaster.broadcast_transaction(&bump_tx);
                        }
                }
-               for (ancestor_claim_txid, cached_claim_datas) in bump_candidates.drain(..) {
+               for (ancestor_claim_txid, cached_claim_datas) in bump_candidates.drain() {
                        self.pending_claim_requests.insert(ancestor_claim_txid, cached_claim_datas);
                }
                self.last_block_hash = block_hash.clone();