use util::ser::{ReadableArgs, Readable, Writer, Writeable, WriterWriteAdaptor, U48};
use util::{byte_utils, events};
-use std::collections::{HashMap, hash_map};
+use std::collections::{HashMap, hash_map, HashSet};
use std::sync::{Arc,Mutex};
use std::{hash,cmp, mem};
let block_hash = header.bitcoin_hash();
let mut monitors = self.monitors.lock().unwrap();
for monitor in monitors.values_mut() {
- monitor.block_disconnected(disconnected_height, &block_hash);
+ monitor.block_disconnected(disconnected_height, &block_hash, &*self.broadcaster, &*self.fee_estimator);
}
}
}
key: SecretKey,
preimage: Option<PaymentPreimage>,
amount: u64,
+ locktime: u32,
},
LocalHTLC {
script: Script,
}
writer.write_all(&byte_utils::be64_to_array(*amount))?;
},
- &InputMaterial::RemoteHTLC { ref script, ref key, ref preimage, ref amount } => {
+ &InputMaterial::RemoteHTLC { ref script, ref key, ref preimage, ref amount, ref locktime } => {
writer.write_all(&[1; 1])?;
script.write(writer)?;
key.write(writer)?;
preimage.write(writer)?;
writer.write_all(&byte_utils::be64_to_array(*amount))?;
+ writer.write_all(&byte_utils::be32_to_array(*locktime))?;
},
&InputMaterial::LocalHTLC { ref script, ref sigs, ref preimage, ref amount } => {
writer.write_all(&[2; 1])?;
let key = Readable::read(reader)?;
let preimage = Readable::read(reader)?;
let amount = Readable::read(reader)?;
+ let locktime = Readable::read(reader)?;
InputMaterial::RemoteHTLC {
script,
key,
preimage,
- amount
+ amount,
+ locktime
}
},
2 => {
HTLCUpdate {
htlc_update: (HTLCSource, PaymentHash),
},
+ /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a remote party tx.
+ /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
+ /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
+ ContentiousOutpoint {
+ outpoint: BitcoinOutPoint,
+ input_material: InputMaterial,
+ }
}
/// Higher-level cache structure needed to re-generate bumped claim txn if needed
#[derive(Clone, PartialEq)]
-struct ClaimTxBumpMaterial {
+pub struct ClaimTxBumpMaterial {
// At every block tick, used to check if pending claiming tx is taking too
// much time for confirmation and we need to bump it.
height_timer: u32,
key_storage: Storage,
their_htlc_base_key: Option<PublicKey>,
their_delayed_payment_base_key: Option<PublicKey>,
+ funding_redeemscript: Option<Script>,
+ channel_value_satoshis: Option<u64>,
// first is the idx of the first of the two revocation points
their_cur_revocation_points: Option<(u64, PublicKey, Option<PublicKey>)>,
// Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
// us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
// Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
+ #[cfg(test)] // Used in functional_test to verify sanitization
+ pub pending_claim_requests: HashMap<Sha256dHash, ClaimTxBumpMaterial>,
+ #[cfg(not(test))]
pending_claim_requests: HashMap<Sha256dHash, ClaimTxBumpMaterial>,
// Used to link outpoints claimed in a connected block to a pending claim request.
// is txid of the initial claiming transaction and is immutable until outpoint is
// post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
// block with output gets disconnected.
+ #[cfg(test)] // Used in functional_test to verify sanitization
+ pub claimable_outpoints: HashMap<BitcoinOutPoint, (Sha256dHash, u32)>,
+ #[cfg(not(test))]
claimable_outpoints: HashMap<BitcoinOutPoint, (Sha256dHash, u32)>,
// Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
self.key_storage != other.key_storage ||
self.their_htlc_base_key != other.their_htlc_base_key ||
self.their_delayed_payment_base_key != other.their_delayed_payment_base_key ||
+ self.funding_redeemscript != other.funding_redeemscript ||
+ self.channel_value_satoshis != other.channel_value_satoshis ||
self.their_cur_revocation_points != other.their_cur_revocation_points ||
self.our_to_self_delay != other.our_to_self_delay ||
self.their_to_self_delay != other.their_to_self_delay ||
},
their_htlc_base_key: None,
their_delayed_payment_base_key: None,
+ funding_redeemscript: None,
+ channel_value_satoshis: None,
their_cur_revocation_points: None,
our_to_self_delay: our_to_self_delay,
Ok(())
}
- /// Panics if commitment_transaction_number_obscure_factor doesn't fit in 48 bits
- pub(super) fn set_commitment_obscure_factor(&mut self, commitment_transaction_number_obscure_factor: u64) {
- assert!(commitment_transaction_number_obscure_factor < (1 << 48));
- self.commitment_transaction_number_obscure_factor = commitment_transaction_number_obscure_factor;
- }
-
/// Allows this monitor to scan only for transactions which are applicable. Note that this is
/// optional, without it this monitor cannot be used in an SPV client, but you may wish to
/// avoid this (or call unset_funding_info) on a monitor you wish to send to a watchtower as it
}
/// We log these base keys at channel opening to being able to rebuild redeemscript in case of leaked revoked commit tx
- pub(super) fn set_their_base_keys(&mut self, their_htlc_base_key: &PublicKey, their_delayed_payment_base_key: &PublicKey) {
+ /// Panics if commitment_transaction_number_obscure_factor doesn't fit in 48 bits
+ pub(super) fn set_basic_channel_info(&mut self, their_htlc_base_key: &PublicKey, their_delayed_payment_base_key: &PublicKey, their_to_self_delay: u16, funding_redeemscript: Script, channel_value_satoshis: u64, commitment_transaction_number_obscure_factor: u64) {
self.their_htlc_base_key = Some(their_htlc_base_key.clone());
self.their_delayed_payment_base_key = Some(their_delayed_payment_base_key.clone());
- }
-
- pub(super) fn set_their_to_self_delay(&mut self, their_to_self_delay: u16) {
self.their_to_self_delay = Some(their_to_self_delay);
+ self.funding_redeemscript = Some(funding_redeemscript);
+ self.channel_value_satoshis = Some(channel_value_satoshis);
+ assert!(commitment_transaction_number_obscure_factor < (1 << 48));
+ self.commitment_transaction_number_obscure_factor = commitment_transaction_number_obscure_factor;
}
pub(super) fn unset_funding_info(&mut self) {
writer.write_all(&self.their_htlc_base_key.as_ref().unwrap().serialize())?;
writer.write_all(&self.their_delayed_payment_base_key.as_ref().unwrap().serialize())?;
+ self.funding_redeemscript.as_ref().unwrap().write(writer)?;
+ self.channel_value_satoshis.unwrap().write(writer)?;
match self.their_cur_revocation_points {
Some((idx, pubkey, second_option)) => {
writer.write_all(&[1; 1])?;
htlc_update.0.write(writer)?;
htlc_update.1.write(writer)?;
+ },
+ OnchainEvent::ContentiousOutpoint { ref outpoint, ref input_material } => {
+ writer.write_all(&[2; 1])?;
+ outpoint.write(writer)?;
+ input_material.write(writer)?;
}
}
}
log_trace!(self, "Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {}", single_htlc_tx.input[0].previous_output.txid, single_htlc_tx.input[0].previous_output.vout, height_timer);
let mut per_input_material = HashMap::with_capacity(1);
per_input_material.insert(single_htlc_tx.input[0].previous_output, InputMaterial::Revoked { script: redeemscript, pubkey: Some(revocation_pubkey), key: revocation_key, is_htlc: true, amount: htlc.amount_msat / 1000 });
+ match self.claimable_outpoints.entry(single_htlc_tx.input[0].previous_output) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((single_htlc_tx.txid(), height)); }
+ }
match self.pending_claim_requests.entry(single_htlc_tx.txid()) {
hash_map::Entry::Occupied(_) => {},
hash_map::Entry::Vacant(entry) => { entry.insert(ClaimTxBumpMaterial { height_timer, feerate_previous: used_feerate, soonest_timelock: htlc.cltv_expiry, per_input_material }); }
}
}
let height_timer = Self::get_height_timer(height, soonest_timelock);
+ let spend_txid = spend_tx.txid();
for (input, info) in spend_tx.input.iter_mut().zip(inputs_info.iter()) {
let (redeemscript, revocation_key) = sign_input!(sighash_parts, input, info.0, info.1);
log_trace!(self, "Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {}", input.previous_output.txid, input.previous_output.vout, height_timer);
per_input_material.insert(input.previous_output, InputMaterial::Revoked { script: redeemscript, pubkey: if info.0.is_some() { Some(revocation_pubkey) } else { None }, key: revocation_key, is_htlc: if info.0.is_some() { true } else { false }, amount: info.1 });
- if info.2 < soonest_timelock {
- soonest_timelock = info.2;
+ match self.claimable_outpoints.entry(input.previous_output) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((spend_txid, height)); }
}
}
- match self.pending_claim_requests.entry(spend_tx.txid()) {
+ match self.pending_claim_requests.entry(spend_txid) {
hash_map::Entry::Occupied(_) => {},
hash_map::Entry::Vacant(entry) => { entry.insert(ClaimTxBumpMaterial { height_timer, feerate_previous: used_feerate, soonest_timelock, per_input_material }); }
}
});
log_trace!(self, "Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {}", single_htlc_tx.input[0].previous_output.txid, single_htlc_tx.input[0].previous_output.vout, height_timer);
let mut per_input_material = HashMap::with_capacity(1);
- per_input_material.insert(single_htlc_tx.input[0].previous_output, InputMaterial::RemoteHTLC { script: redeemscript, key: htlc_key, preimage: Some(*payment_preimage), amount: htlc.amount_msat / 1000 });
+ per_input_material.insert(single_htlc_tx.input[0].previous_output, InputMaterial::RemoteHTLC { script: redeemscript, key: htlc_key, preimage: Some(*payment_preimage), amount: htlc.amount_msat / 1000, locktime: 0 });
+ match self.claimable_outpoints.entry(single_htlc_tx.input[0].previous_output) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((single_htlc_tx.txid(), height)); }
+ }
match self.pending_claim_requests.entry(single_htlc_tx.txid()) {
hash_map::Entry::Occupied(_) => {},
hash_map::Entry::Vacant(entry) => { entry.insert(ClaimTxBumpMaterial { height_timer, feerate_previous: used_feerate, soonest_timelock: htlc.cltv_expiry, per_input_material}); }
//TODO: track SpendableOutputDescriptor
log_trace!(self, "Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {}", timeout_tx.input[0].previous_output.txid, timeout_tx.input[0].previous_output.vout, height_timer);
let mut per_input_material = HashMap::with_capacity(1);
- per_input_material.insert(timeout_tx.input[0].previous_output, InputMaterial::RemoteHTLC { script : redeemscript, key: htlc_key, preimage: None, amount: htlc.amount_msat / 1000 });
+ per_input_material.insert(timeout_tx.input[0].previous_output, InputMaterial::RemoteHTLC { script : redeemscript, key: htlc_key, preimage: None, amount: htlc.amount_msat / 1000, locktime: htlc.cltv_expiry });
+ match self.claimable_outpoints.entry(timeout_tx.input[0].previous_output) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((timeout_tx.txid(), height)); }
+ }
match self.pending_claim_requests.entry(timeout_tx.txid()) {
hash_map::Entry::Occupied(_) => {},
hash_map::Entry::Vacant(entry) => { entry.insert(ClaimTxBumpMaterial { height_timer, feerate_previous: used_feerate, soonest_timelock: htlc.cltv_expiry, per_input_material }); }
}
}
let height_timer = Self::get_height_timer(height, soonest_timelock);
+ let spend_txid = spend_tx.txid();
for (input, info) in spend_tx.input.iter_mut().zip(inputs_info.iter()) {
let (redeemscript, htlc_key) = sign_input!(sighash_parts, input, info.1, (info.0).0.to_vec());
log_trace!(self, "Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {}", input.previous_output.txid, input.previous_output.vout, height_timer);
- per_input_material.insert(input.previous_output, InputMaterial::RemoteHTLC { script: redeemscript, key: htlc_key, preimage: Some(*(info.0)), amount: info.1});
+ per_input_material.insert(input.previous_output, InputMaterial::RemoteHTLC { script: redeemscript, key: htlc_key, preimage: Some(*(info.0)), amount: info.1, locktime: 0});
+ match self.claimable_outpoints.entry(input.previous_output) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((spend_txid, height)); }
+ }
}
- match self.pending_claim_requests.entry(spend_tx.txid()) {
+ match self.pending_claim_requests.entry(spend_txid) {
hash_map::Entry::Occupied(_) => {},
hash_map::Entry::Vacant(entry) => { entry.insert(ClaimTxBumpMaterial { height_timer, feerate_previous: used_feerate, soonest_timelock, per_input_material }); }
}
/// Attempts to claim a remote HTLC-Success/HTLC-Timeout's outputs using the revocation key
fn check_spend_remote_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32, fee_estimator: &FeeEstimator) -> (Option<Transaction>, Option<SpendableOutputDescriptor>) {
+ //TODO: send back new outputs to guarantee pending_claim_request consistency
if tx.input.len() != 1 || tx.output.len() != 1 {
return (None, None)
}
log_trace!(self, "Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {}", spend_tx.input[0].previous_output.txid, spend_tx.input[0].previous_output.vout, height_timer);
let mut per_input_material = HashMap::with_capacity(1);
per_input_material.insert(spend_tx.input[0].previous_output, InputMaterial::Revoked { script: redeemscript, pubkey: None, key: revocation_key, is_htlc: false, amount: tx.output[0].value });
+ match self.claimable_outpoints.entry(spend_tx.input[0].previous_output) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((spend_tx.txid(), height)); }
+ }
match self.pending_claim_requests.entry(spend_tx.txid()) {
hash_map::Entry::Occupied(_) => {},
hash_map::Entry::Vacant(entry) => { entry.insert(ClaimTxBumpMaterial { height_timer, feerate_previous: used_feerate, soonest_timelock: height + self.our_to_self_delay as u32, per_input_material }); }
let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
let mut per_input_material = HashMap::with_capacity(1);
per_input_material.insert(htlc_timeout_tx.input[0].previous_output, InputMaterial::LocalHTLC { script: htlc_script, sigs: (*their_sig, *our_sig), preimage: None, amount: htlc.amount_msat / 1000});
+ //TODO: with option_simplified_commitment track outpoint too
log_trace!(self, "Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {}", htlc_timeout_tx.input[0].previous_output.vout, htlc_timeout_tx.input[0].previous_output.txid, height_timer);
pending_claims.push((htlc_timeout_tx.txid(), ClaimTxBumpMaterial { height_timer, feerate_previous: 0, soonest_timelock: htlc.cltv_expiry, per_input_material }));
res.push(htlc_timeout_tx);
let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
let mut per_input_material = HashMap::with_capacity(1);
per_input_material.insert(htlc_success_tx.input[0].previous_output, InputMaterial::LocalHTLC { script: htlc_script, sigs: (*their_sig, *our_sig), preimage: Some(*payment_preimage), amount: htlc.amount_msat / 1000});
+ //TODO: with option_simplified_commitment track outpoint too
log_trace!(self, "Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {}", htlc_success_tx.input[0].previous_output.vout, htlc_success_tx.input[0].previous_output.txid, height_timer);
pending_claims.push((htlc_success_tx.txid(), ClaimTxBumpMaterial { height_timer, feerate_previous: 0, soonest_timelock: htlc.cltv_expiry, per_input_material }));
res.push(htlc_success_tx);
/// out-of-band the other node operator to coordinate with him if option is available to you.
/// In any-case, choice is up to the user.
pub fn get_latest_local_commitment_txn(&self) -> Vec<Transaction> {
+ log_trace!(self, "Getting signed latest local commitment transaction!");
if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
let mut res = vec![local_tx.tx.clone()];
match self.key_storage {
}
fn block_connected(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator)-> (Vec<(Sha256dHash, Vec<TxOut>)>, Vec<SpendableOutputDescriptor>, Vec<(HTLCSource, Option<PaymentPreimage>, PaymentHash)>) {
+ log_trace!(self, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len());
let mut watch_outputs = Vec::new();
let mut spendable_outputs = Vec::new();
let mut htlc_updated = Vec::new();
- let mut bump_candidates = Vec::new();
+ let mut bump_candidates = HashSet::new();
for tx in txn_matched {
if tx.input.len() == 1 {
// Assuming our keys were not leaked (in which case we're screwed no matter what),
}
// Scan all input to verify is one of the outpoint spent is of interest for us
+ let mut claimed_outputs_material = Vec::new();
for inp in &tx.input {
- if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&inp.previous_output) {
+ if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
// If outpoint has claim request pending on it...
- if let Some(claim_material) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
+ if let Some(claim_material) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
//... we need to verify equality between transaction outpoints and claim request
// outpoints to know if transaction is the original claim or a bumped one issued
// by us.
- let mut claimed_outpoints = Vec::new();
- for (claim_inp, tx_inp) in claim_material.per_input_material.keys().zip(tx.input.iter()) {
- if *claim_inp != tx_inp.previous_output {
- claimed_outpoints.push(tx_inp.previous_output.clone());
+ let mut set_equality = true;
+ if claim_material.per_input_material.len() != tx.input.len() {
+ set_equality = false;
+ } else {
+ for (claim_inp, tx_inp) in claim_material.per_input_material.keys().zip(tx.input.iter()) {
+ if *claim_inp != tx_inp.previous_output {
+ set_equality = false;
+ }
}
}
- if claimed_outpoints.len() == 0 && claim_material.per_input_material.len() == tx.input.len() { // If true, register claim request to be removed after reaching a block security height
- match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
- hash_map::Entry::Occupied(_) => {},
- hash_map::Entry::Vacant(entry) => {
- entry.insert(vec![OnchainEvent::Claim { claim_request: ancestor_claimable_txid.0.clone()}]);
+
+ macro_rules! clean_claim_request_after_safety_delay {
+ () => {
+ let new_event = OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() };
+ match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+ hash_map::Entry::Occupied(mut entry) => {
+ if !entry.get().contains(&new_event) {
+ entry.get_mut().push(new_event);
+ }
+ },
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec![new_event]);
+ }
}
}
+ }
+
+ // If this is our transaction (or our counterparty spent all the outputs
+ // before we could anyway with same inputs order than us), wait for
+ // ANTI_REORG_DELAY and clean the RBF tracking map.
+ if set_equality {
+ clean_claim_request_after_safety_delay!();
} else { // If false, generate new claim request with update outpoint set
- for already_claimed in claimed_outpoints {
- claim_material.per_input_material.remove(&already_claimed);
+ for input in tx.input.iter() {
+ if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) {
+ claimed_outputs_material.push((input.previous_output, input_material));
+ }
+ // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
+ if claim_material.per_input_material.is_empty() {
+ clean_claim_request_after_safety_delay!();
+ }
}
- // Avoid bump engine using inaccurate feerate due to new transaction size
- claim_material.feerate_previous = 0;
//TODO: recompute soonest_timelock to avoid wasting a bit on fees
- bump_candidates.push((ancestor_claimable_txid.0.clone(), claim_material.clone()));
+ bump_candidates.insert(first_claim_txid_height.0.clone());
}
+ break; //No need to iterate further, either tx is our or their
} else {
panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
}
}
}
+ for (outpoint, input_material) in claimed_outputs_material.drain(..) {
+ let new_event = OnchainEvent::ContentiousOutpoint { outpoint, input_material };
+ match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+ hash_map::Entry::Occupied(mut entry) => {
+ if !entry.get().contains(&new_event) {
+ entry.get_mut().push(new_event);
+ }
+ },
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec![new_event]);
+ }
+ }
+ }
}
if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx {
if self.would_broadcast_at_height(height) {
for ev in events {
match ev {
OnchainEvent::Claim { claim_request } => {
- // We may remove a whole set of claim outpoints here, as these one may have been aggregated in a single tx and claimed so atomically
- self.pending_claim_requests.remove(&claim_request);
+ // We may remove a whole set of claim outpoints here, as these one may have
+ // been aggregated in a single tx and claimed so atomically
+ if let Some(bump_material) = self.pending_claim_requests.remove(&claim_request) {
+ for outpoint in bump_material.per_input_material.keys() {
+ self.claimable_outpoints.remove(&outpoint);
+ }
+ }
},
OnchainEvent::HTLCUpdate { htlc_update } => {
log_trace!(self, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0));
htlc_updated.push((htlc_update.0, None, htlc_update.1));
},
+ OnchainEvent::ContentiousOutpoint { outpoint, .. } => {
+ self.claimable_outpoints.remove(&outpoint);
+ }
}
}
}
- for (ancestor_claim_txid, ref mut cached_claim_datas) in self.pending_claim_requests.iter_mut() {
+ for (first_claim_txid, ref mut cached_claim_datas) in self.pending_claim_requests.iter_mut() {
if cached_claim_datas.height_timer == height {
- bump_candidates.push((ancestor_claim_txid.clone(), cached_claim_datas.clone()));
+ bump_candidates.insert(first_claim_txid.clone());
}
}
- for &mut (_, ref mut cached_claim_datas) in bump_candidates.iter_mut() {
- if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &cached_claim_datas, fee_estimator) {
- cached_claim_datas.height_timer = new_timer;
- cached_claim_datas.feerate_previous = new_feerate;
- broadcaster.broadcast_transaction(&bump_tx);
+ for first_claim_txid in bump_candidates.iter() {
+ if let Some((new_timer, new_feerate)) = {
+ if let Some(claim_material) = self.pending_claim_requests.get(first_claim_txid) {
+ if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &claim_material, fee_estimator) {
+ broadcaster.broadcast_transaction(&bump_tx);
+ Some((new_timer, new_feerate))
+ } else { None }
+ } else { unreachable!(); }
+ } {
+ if let Some(claim_material) = self.pending_claim_requests.get_mut(first_claim_txid) {
+ claim_material.height_timer = new_timer;
+ claim_material.feerate_previous = new_feerate;
+ } else { unreachable!(); }
}
}
- for (ancestor_claim_txid, cached_claim_datas) in bump_candidates.drain(..) {
- self.pending_claim_requests.insert(ancestor_claim_txid, cached_claim_datas);
- }
self.last_block_hash = block_hash.clone();
(watch_outputs, spendable_outputs, htlc_updated)
}
- fn block_disconnected(&mut self, height: u32, block_hash: &Sha256dHash) {
- if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
+ fn block_disconnected(&mut self, height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator) {
+ let mut bump_candidates = HashMap::new();
+ if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
//We may discard:
//- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
//- our claim tx on a commitment tx output
+ //- resurect outpoint back in its claimable set and regenerate tx
+ for ev in events {
+ match ev {
+ OnchainEvent::ContentiousOutpoint { outpoint, input_material } => {
+ if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&outpoint) {
+ if let Some(claim_material) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
+ claim_material.per_input_material.insert(outpoint, input_material);
+ // Using a HashMap guarantee us than if we have multiple outpoints getting
+ // resurrected only one bump claim tx is going to be broadcast
+ bump_candidates.insert(ancestor_claimable_txid.clone(), claim_material.clone());
+ }
+ }
+ },
+ _ => {},
+ }
+ }
+ }
+ for (_, claim_material) in bump_candidates.iter_mut() {
+ if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &claim_material, fee_estimator) {
+ claim_material.height_timer = new_timer;
+ claim_material.feerate_previous = new_feerate;
+ broadcaster.broadcast_transaction(&bump_tx);
+ }
+ }
+ for (ancestor_claim_txid, claim_material) in bump_candidates.drain() {
+ self.pending_claim_requests.insert(ancestor_claim_txid.0, claim_material);
+ }
+ //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
+ // right now if one of the outpoint get disconnected, just erase whole pending claim request.
+ let mut remove_request = Vec::new();
+ self.claimable_outpoints.retain(|_, ref v|
+ if v.1 == height {
+ remove_request.push(v.0.clone());
+ false
+ } else { true });
+ for req in remove_request {
+ self.pending_claim_requests.remove(&req);
}
- self.claimable_outpoints.retain(|_, ref v| if v.1 == height { false } else { true });
self.last_block_hash = block_hash.clone();
}
inputs_witnesses_weight += Self::get_witnesses_weight(if !is_htlc { &[InputDescriptors::RevokedOutput] } else if script.len() == OFFERED_HTLC_SCRIPT_WEIGHT { &[InputDescriptors::RevokedOfferedHTLC] } else if script.len() == ACCEPTED_HTLC_SCRIPT_WEIGHT { &[InputDescriptors::RevokedReceivedHTLC] } else { &[] });
amt += *amount;
},
- &InputMaterial::RemoteHTLC { .. } => { },
+ &InputMaterial::RemoteHTLC { ref preimage, ref amount, .. } => {
+ inputs_witnesses_weight += Self::get_witnesses_weight(if preimage.is_some() { &[InputDescriptors::OfferedHTLC] } else { &[InputDescriptors::ReceivedHTLC] });
+ amt += *amount;
+ },
&InputMaterial::LocalHTLC { .. } => { return None; }
}
}
bumped_tx.input[i].witness.push(script.clone().into_bytes());
log_trace!(self, "Going to broadcast bumped Penalty Transaction {} claiming revoked {} output {} from {} with new feerate {}", bumped_tx.txid(), if !is_htlc { "to_local" } else if script.len() == OFFERED_HTLC_SCRIPT_WEIGHT { "offered" } else if script.len() == ACCEPTED_HTLC_SCRIPT_WEIGHT { "received" } else { "" }, outp.vout, outp.txid, new_feerate);
},
- &InputMaterial::RemoteHTLC { .. } => {},
+ &InputMaterial::RemoteHTLC { ref script, ref key, ref preimage, ref amount, ref locktime } => {
+ if !preimage.is_some() { bumped_tx.lock_time = *locktime };
+ let sighash_parts = bip143::SighashComponents::new(&bumped_tx);
+ let sighash = hash_to_message!(&sighash_parts.sighash_all(&bumped_tx.input[i], &script, *amount)[..]);
+ let sig = self.secp_ctx.sign(&sighash, &key);
+ bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
+ bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+ if let &Some(preimage) = preimage {
+ bumped_tx.input[i].witness.push(preimage.clone().0.to_vec());
+ } else {
+ bumped_tx.input[i].witness.push(vec![0]);
+ }
+ bumped_tx.input[i].witness.push(script.clone().into_bytes());
+ log_trace!(self, "Going to broadcast bumped Claim Transaction {} claiming remote {} htlc output {} from {} with new feerate {}", bumped_tx.txid(), if preimage.is_some() { "offered" } else { "received" }, outp.vout, outp.txid, new_feerate);
+ },
&InputMaterial::LocalHTLC { .. } => {
//TODO : Given that Local Commitment Transaction and HTLC-Timeout/HTLC-Success are counter-signed by peer, we can't
// RBF them. Need a Lightning specs change and package relay modification :
let their_htlc_base_key = Some(Readable::read(reader)?);
let their_delayed_payment_base_key = Some(Readable::read(reader)?);
+ let funding_redeemscript = Some(Readable::read(reader)?);
+ let channel_value_satoshis = Some(Readable::read(reader)?);
let their_cur_revocation_points = {
let first_idx = <U48 as Readable<R>>::read(reader)?.0;
htlc_update: (htlc_source, hash)
}
},
+ 2 => {
+ let outpoint = Readable::read(reader)?;
+ let input_material = Readable::read(reader)?;
+ OnchainEvent::ContentiousOutpoint {
+ outpoint,
+ input_material
+ }
+ }
_ => return Err(DecodeError::InvalidValue),
};
events.push(ev);
key_storage,
their_htlc_base_key,
their_delayed_payment_base_key,
+ funding_redeemscript,
+ channel_value_satoshis,
their_cur_revocation_points,
our_to_self_delay,
// Prune with one old state and a local commitment tx holding a few overlaps with the
// old state.
let mut monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- monitor.set_their_to_self_delay(10);
+ monitor.their_to_self_delay = Some(10);
monitor.provide_latest_local_commitment_tx_info(dummy_tx.clone(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..10]));
monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key);