X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmonitor.rs;h=ea84b984ed7bb122e540c99fb102c7d62498bba9;hb=8defcf1107a7f564ebce0048dd98ae72f2026db6;hp=e17111256ea016ae6df06abaa22fcb5b846f8043;hpb=d421816e8420a9402461bbc96588843a1519cdff;p=rust-lightning diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index e1711125..ea84b984 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -41,7 +41,7 @@ use util::logger::Logger; use util::ser::{ReadableArgs, Readable, Writer, Writeable, WriterWriteAdaptor, U48}; use util::{byte_utils, events}; -use std::collections::{HashMap, hash_map}; +use std::collections::{HashMap, hash_map, HashSet}; use std::sync::{Arc,Mutex}; use std::{hash,cmp, mem}; @@ -513,7 +513,7 @@ enum OnchainEvent { /// Higher-level cache structure needed to re-generate bumped claim txn if needed #[derive(Clone, PartialEq)] -struct ClaimTxBumpMaterial { +pub struct ClaimTxBumpMaterial { // At every block tick, used to check if pending claiming tx is taking too // much time for confirmation and we need to bump it. height_timer: u32, @@ -621,6 +621,9 @@ pub struct ChannelMonitor { // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved. // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial) + #[cfg(test)] // Used in functional_test to verify sanitization + pub pending_claim_requests: HashMap, + #[cfg(not(test))] pending_claim_requests: HashMap, // Used to link outpoints claimed in a connected block to a pending claim request. @@ -629,6 +632,9 @@ pub struct ChannelMonitor { // is txid of the initial claiming transaction and is immutable until outpoint is // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if // block with output gets disconnected. + #[cfg(test)] // Used in functional_test to verify sanitization + pub claimable_outpoints: HashMap, + #[cfg(not(test))] claimable_outpoints: HashMap, // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which @@ -2327,10 +2333,11 @@ impl ChannelMonitor { } fn block_connected(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator)-> (Vec<(Sha256dHash, Vec)>, Vec, Vec<(HTLCSource, Option, PaymentHash)>) { + log_trace!(self, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len()); let mut watch_outputs = Vec::new(); let mut spendable_outputs = Vec::new(); let mut htlc_updated = Vec::new(); - let mut bump_candidates = Vec::new(); + let mut bump_candidates = HashSet::new(); for tx in txn_matched { if tx.input.len() == 1 { // Assuming our keys were not leaked (in which case we're screwed no matter what), @@ -2394,37 +2401,58 @@ impl ChannelMonitor { } // Scan all input to verify is one of the outpoint spent is of interest for us - let mut claimed_outpoints = Vec::new(); - let mut claimed_input_material = Vec::new(); + let mut claimed_outputs_material = Vec::new(); for inp in &tx.input { - if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&inp.previous_output) { + if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) { // If outpoint has claim request pending on it... - if let Some(claim_material) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) { + if let Some(claim_material) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) { //... we need to verify equality between transaction outpoints and claim request // outpoints to know if transaction is the original claim or a bumped one issued // by us. - for claim_inp in claim_material.per_input_material.keys() { - if *claim_inp == inp.previous_output { - claimed_outpoints.push(inp.previous_output.clone()); + let mut set_equality = true; + if claim_material.per_input_material.len() != tx.input.len() { + set_equality = false; + } else { + for (claim_inp, tx_inp) in claim_material.per_input_material.keys().zip(tx.input.iter()) { + if *claim_inp != tx_inp.previous_output { + set_equality = false; + } } } - if claimed_outpoints.len() == 0 && claim_material.per_input_material.len() == tx.input.len() { // If true, register claim request to be removed after reaching a block security height - match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) { - hash_map::Entry::Occupied(_) => {}, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![OnchainEvent::Claim { claim_request: ancestor_claimable_txid.0.clone()}]); + + macro_rules! clean_claim_request_after_safety_delay { + () => { + let new_event = OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }; + match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) { + hash_map::Entry::Occupied(mut entry) => { + if !entry.get().contains(&new_event) { + entry.get_mut().push(new_event); + } + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![new_event]); + } } } + } + + // If this is our transaction (or our counterparty spent all the outputs + // before we could anyway with same inputs order than us), wait for + // ANTI_REORG_DELAY and clean the RBF tracking map. + if set_equality { + clean_claim_request_after_safety_delay!(); } else { // If false, generate new claim request with update outpoint set - for already_claimed in claimed_outpoints.iter() { - if let Some(input_material) = claim_material.per_input_material.remove(&already_claimed) { - claimed_input_material.push(input_material); + for input in tx.input.iter() { + if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) { + claimed_outputs_material.push((input.previous_output, input_material)); + } + // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY. + if claim_material.per_input_material.is_empty() { + clean_claim_request_after_safety_delay!(); } } - // Avoid bump engine using inaccurate feerate due to new transaction size - claim_material.feerate_previous = 0; //TODO: recompute soonest_timelock to avoid wasting a bit on fees - bump_candidates.push((ancestor_claimable_txid.0.clone(), claim_material.clone())); + bump_candidates.insert(first_claim_txid_height.0.clone()); } break; //No need to iterate further, either tx is our or their } else { @@ -2432,11 +2460,16 @@ impl ChannelMonitor { } } } - for (outpoint, input_material) in claimed_outpoints.iter().zip(claimed_input_material.drain(..)) { + for (outpoint, input_material) in claimed_outputs_material.drain(..) { + let new_event = OnchainEvent::ContentiousOutpoint { outpoint, input_material }; match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) { - hash_map::Entry::Occupied(_) => {}, + hash_map::Entry::Occupied(mut entry) => { + if !entry.get().contains(&new_event) { + entry.get_mut().push(new_event); + } + }, hash_map::Entry::Vacant(entry) => { - entry.insert(vec![OnchainEvent::ContentiousOutpoint { outpoint: *outpoint, input_material: input_material }]); + entry.insert(vec![new_event]); } } } @@ -2475,8 +2508,13 @@ impl ChannelMonitor { for ev in events { match ev { OnchainEvent::Claim { claim_request } => { - // We may remove a whole set of claim outpoints here, as these one may have been aggregated in a single tx and claimed so atomically - self.pending_claim_requests.remove(&claim_request); + // We may remove a whole set of claim outpoints here, as these one may have + // been aggregated in a single tx and claimed so atomically + if let Some(bump_material) = self.pending_claim_requests.remove(&claim_request) { + for outpoint in bump_material.per_input_material.keys() { + self.claimable_outpoints.remove(&outpoint); + } + } }, OnchainEvent::HTLCUpdate { htlc_update } => { log_trace!(self, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0)); @@ -2488,21 +2526,26 @@ impl ChannelMonitor { } } } - for (ancestor_claim_txid, ref mut cached_claim_datas) in self.pending_claim_requests.iter_mut() { + for (first_claim_txid, ref mut cached_claim_datas) in self.pending_claim_requests.iter_mut() { if cached_claim_datas.height_timer == height { - bump_candidates.push((ancestor_claim_txid.clone(), cached_claim_datas.clone())); + bump_candidates.insert(first_claim_txid.clone()); } } - for &mut (_, ref mut cached_claim_datas) in bump_candidates.iter_mut() { - if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &cached_claim_datas, fee_estimator) { - cached_claim_datas.height_timer = new_timer; - cached_claim_datas.feerate_previous = new_feerate; - broadcaster.broadcast_transaction(&bump_tx); + for first_claim_txid in bump_candidates.iter() { + if let Some((new_timer, new_feerate)) = { + if let Some(claim_material) = self.pending_claim_requests.get(first_claim_txid) { + if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &claim_material, fee_estimator) { + broadcaster.broadcast_transaction(&bump_tx); + Some((new_timer, new_feerate)) + } else { None } + } else { unreachable!(); } + } { + if let Some(claim_material) = self.pending_claim_requests.get_mut(first_claim_txid) { + claim_material.height_timer = new_timer; + claim_material.feerate_previous = new_feerate; + } else { unreachable!(); } } } - for (ancestor_claim_txid, cached_claim_datas) in bump_candidates.drain(..) { - self.pending_claim_requests.insert(ancestor_claim_txid, cached_claim_datas); - } self.last_block_hash = block_hash.clone(); (watch_outputs, spendable_outputs, htlc_updated) } @@ -2519,8 +2562,6 @@ impl ChannelMonitor { OnchainEvent::ContentiousOutpoint { outpoint, input_material } => { if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&outpoint) { if let Some(claim_material) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) { - // Avoid bump engine using inaccurate feerate due to new transaction size - claim_material.feerate_previous = 0; claim_material.per_input_material.insert(outpoint, input_material); // Using a HashMap guarantee us than if we have multiple outpoints getting // resurrected only one bump claim tx is going to be broadcast