X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmonitor.rs;h=ea84b984ed7bb122e540c99fb102c7d62498bba9;hb=8defcf1107a7f564ebce0048dd98ae72f2026db6;hp=8c1670ca40ea8ca30cfd9073bd4f1bbf4861cbdf;hpb=6a775ea1e96e397e45092199d328524ae25c2b61;p=rust-lightning diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index 8c1670ca..ea84b984 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -41,7 +41,7 @@ use util::logger::Logger; use util::ser::{ReadableArgs, Readable, Writer, Writeable, WriterWriteAdaptor, U48}; use util::{byte_utils, events}; -use std::collections::{HashMap, hash_map}; +use std::collections::{HashMap, hash_map, HashSet}; use std::sync::{Arc,Mutex}; use std::{hash,cmp, mem}; @@ -513,7 +513,7 @@ enum OnchainEvent { /// Higher-level cache structure needed to re-generate bumped claim txn if needed #[derive(Clone, PartialEq)] -struct ClaimTxBumpMaterial { +pub struct ClaimTxBumpMaterial { // At every block tick, used to check if pending claiming tx is taking too // much time for confirmation and we need to bump it. height_timer: u32, @@ -621,6 +621,9 @@ pub struct ChannelMonitor { // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved. // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial) + #[cfg(test)] // Used in functional_test to verify sanitization + pub pending_claim_requests: HashMap, + #[cfg(not(test))] pending_claim_requests: HashMap, // Used to link outpoints claimed in a connected block to a pending claim request. @@ -629,6 +632,9 @@ pub struct ChannelMonitor { // is txid of the initial claiming transaction and is immutable until outpoint is // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if // block with output gets disconnected. + #[cfg(test)] // Used in functional_test to verify sanitization + pub claimable_outpoints: HashMap, + #[cfg(not(test))] claimable_outpoints: HashMap, // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which @@ -2331,7 +2337,7 @@ impl ChannelMonitor { let mut watch_outputs = Vec::new(); let mut spendable_outputs = Vec::new(); let mut htlc_updated = Vec::new(); - let mut bump_candidates = HashMap::new(); + let mut bump_candidates = HashSet::new(); for tx in txn_matched { if tx.input.len() == 1 { // Assuming our keys were not leaked (in which case we're screwed no matter what), @@ -2397,9 +2403,9 @@ impl ChannelMonitor { // Scan all input to verify is one of the outpoint spent is of interest for us let mut claimed_outputs_material = Vec::new(); for inp in &tx.input { - if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&inp.previous_output) { + if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) { // If outpoint has claim request pending on it... - if let Some(claim_material) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) { + if let Some(claim_material) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) { //... we need to verify equality between transaction outpoints and claim request // outpoints to know if transaction is the original claim or a bumped one issued // by us. @@ -2414,29 +2420,39 @@ impl ChannelMonitor { } } - // If this is our transaction (or our counterparty spent all the outputs - // before we could anyway), wait for ANTI_REORG_DELAY and clean the RBF - // tracking map. - if set_equality { - let new_event = OnchainEvent::Claim { claim_request: ancestor_claimable_txid.0.clone() }; - match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) { - hash_map::Entry::Occupied(mut entry) => { - if !entry.get().contains(&new_event) { - entry.get_mut().push(new_event); + macro_rules! clean_claim_request_after_safety_delay { + () => { + let new_event = OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }; + match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) { + hash_map::Entry::Occupied(mut entry) => { + if !entry.get().contains(&new_event) { + entry.get_mut().push(new_event); + } + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![new_event]); } - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![new_event]); } } + } + + // If this is our transaction (or our counterparty spent all the outputs + // before we could anyway with same inputs order than us), wait for + // ANTI_REORG_DELAY and clean the RBF tracking map. + if set_equality { + clean_claim_request_after_safety_delay!(); } else { // If false, generate new claim request with update outpoint set for input in tx.input.iter() { if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) { claimed_outputs_material.push((input.previous_output, input_material)); } + // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY. + if claim_material.per_input_material.is_empty() { + clean_claim_request_after_safety_delay!(); + } } //TODO: recompute soonest_timelock to avoid wasting a bit on fees - bump_candidates.insert(ancestor_claimable_txid.0.clone(), claim_material.clone()); + bump_candidates.insert(first_claim_txid_height.0.clone()); } break; //No need to iterate further, either tx is our or their } else { @@ -2510,23 +2526,26 @@ impl ChannelMonitor { } } } - for (ancestor_claim_txid, ref mut cached_claim_datas) in self.pending_claim_requests.iter_mut() { + for (first_claim_txid, ref mut cached_claim_datas) in self.pending_claim_requests.iter_mut() { if cached_claim_datas.height_timer == height { - if let hash_map::Entry::Vacant(entry) = bump_candidates.entry(ancestor_claim_txid.clone()) { - entry.insert(cached_claim_datas.clone()); - } + bump_candidates.insert(first_claim_txid.clone()); + } + } + for first_claim_txid in bump_candidates.iter() { + if let Some((new_timer, new_feerate)) = { + if let Some(claim_material) = self.pending_claim_requests.get(first_claim_txid) { + if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &claim_material, fee_estimator) { + broadcaster.broadcast_transaction(&bump_tx); + Some((new_timer, new_feerate)) + } else { None } + } else { unreachable!(); } + } { + if let Some(claim_material) = self.pending_claim_requests.get_mut(first_claim_txid) { + claim_material.height_timer = new_timer; + claim_material.feerate_previous = new_feerate; + } else { unreachable!(); } } } - for ref mut cached_claim_datas in bump_candidates.values_mut() { - if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &cached_claim_datas, fee_estimator) { - cached_claim_datas.height_timer = new_timer; - cached_claim_datas.feerate_previous = new_feerate; - broadcaster.broadcast_transaction(&bump_tx); - } - } - for (ancestor_claim_txid, cached_claim_datas) in bump_candidates.drain() { - self.pending_claim_requests.insert(ancestor_claim_txid, cached_claim_datas); - } self.last_block_hash = block_hash.clone(); (watch_outputs, spendable_outputs, htlc_updated) }