use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::transaction::{TxOut,Transaction};
-use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::blockdata::script::{Script, Builder};
use bitcoin::blockdata::opcodes;
use ln::chan_utils;
use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCType, ChannelTransactionParameters, HolderCommitmentTransaction};
use ln::channelmanager::{BestBlock, HTLCSource};
-use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
use chain;
use chain::WatchedOutput;
use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use chain::transaction::{OutPoint, TransactionData};
use chain::keysinterface::{SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, Sign, KeysInterface};
+use chain::onchaintx::OnchainTxHandler;
+use chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
use chain::Filter;
use util::logger::Logger;
use util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, U48};
}
}
-/// When ChannelMonitor discovers an onchain outpoint being a step of a channel and that it needs
-/// to generate a tx to push channel state forward, we cache outpoint-solving tx material to build
-/// a new bumped one in case of lenghty confirmation delay
-#[derive(Clone, PartialEq)]
-pub(crate) enum InputMaterial {
- Revoked {
- per_commitment_point: PublicKey,
- counterparty_delayed_payment_base_key: PublicKey,
- counterparty_htlc_base_key: PublicKey,
- per_commitment_key: SecretKey,
- input_descriptor: InputDescriptors,
- amount: u64,
- htlc: Option<HTLCOutputInCommitment>,
- on_counterparty_tx_csv: u16,
- },
- CounterpartyHTLC {
- per_commitment_point: PublicKey,
- counterparty_delayed_payment_base_key: PublicKey,
- counterparty_htlc_base_key: PublicKey,
- preimage: Option<PaymentPreimage>,
- htlc: HTLCOutputInCommitment
- },
- HolderHTLC {
- preimage: Option<PaymentPreimage>,
- amount: u64,
- },
- Funding {
- funding_redeemscript: Script,
- }
-}
-
-impl Writeable for InputMaterial {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match self {
- &InputMaterial::Revoked { ref per_commitment_point, ref counterparty_delayed_payment_base_key, ref counterparty_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc, ref on_counterparty_tx_csv} => {
- writer.write_all(&[0; 1])?;
- per_commitment_point.write(writer)?;
- counterparty_delayed_payment_base_key.write(writer)?;
- counterparty_htlc_base_key.write(writer)?;
- writer.write_all(&per_commitment_key[..])?;
- input_descriptor.write(writer)?;
- writer.write_all(&byte_utils::be64_to_array(*amount))?;
- htlc.write(writer)?;
- on_counterparty_tx_csv.write(writer)?;
- },
- &InputMaterial::CounterpartyHTLC { ref per_commitment_point, ref counterparty_delayed_payment_base_key, ref counterparty_htlc_base_key, ref preimage, ref htlc} => {
- writer.write_all(&[1; 1])?;
- per_commitment_point.write(writer)?;
- counterparty_delayed_payment_base_key.write(writer)?;
- counterparty_htlc_base_key.write(writer)?;
- preimage.write(writer)?;
- htlc.write(writer)?;
- },
- &InputMaterial::HolderHTLC { ref preimage, ref amount } => {
- writer.write_all(&[2; 1])?;
- preimage.write(writer)?;
- writer.write_all(&byte_utils::be64_to_array(*amount))?;
- },
- &InputMaterial::Funding { ref funding_redeemscript } => {
- writer.write_all(&[3; 1])?;
- funding_redeemscript.write(writer)?;
- }
- }
- Ok(())
- }
-}
-
-impl Readable for InputMaterial {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
- let input_material = match <u8 as Readable>::read(reader)? {
- 0 => {
- let per_commitment_point = Readable::read(reader)?;
- let counterparty_delayed_payment_base_key = Readable::read(reader)?;
- let counterparty_htlc_base_key = Readable::read(reader)?;
- let per_commitment_key = Readable::read(reader)?;
- let input_descriptor = Readable::read(reader)?;
- let amount = Readable::read(reader)?;
- let htlc = Readable::read(reader)?;
- let on_counterparty_tx_csv = Readable::read(reader)?;
- InputMaterial::Revoked {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- per_commitment_key,
- input_descriptor,
- amount,
- htlc,
- on_counterparty_tx_csv
- }
- },
- 1 => {
- let per_commitment_point = Readable::read(reader)?;
- let counterparty_delayed_payment_base_key = Readable::read(reader)?;
- let counterparty_htlc_base_key = Readable::read(reader)?;
- let preimage = Readable::read(reader)?;
- let htlc = Readable::read(reader)?;
- InputMaterial::CounterpartyHTLC {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- preimage,
- htlc
- }
- },
- 2 => {
- let preimage = Readable::read(reader)?;
- let amount = Readable::read(reader)?;
- InputMaterial::HolderHTLC {
- preimage,
- amount,
- }
- },
- 3 => {
- InputMaterial::Funding {
- funding_redeemscript: Readable::read(reader)?,
- }
- }
- _ => return Err(DecodeError::InvalidValue),
- };
- Ok(input_material)
- }
-}
-
-/// ClaimRequest is a descriptor structure to communicate between detection
-/// and reaction module. They are generated by ChannelMonitor while parsing
-/// onchain txn leaked from a channel and handed over to OnchainTxHandler which
-/// is responsible for opportunistic aggregation, selecting and enforcing
-/// bumping logic, building and signing transactions.
-pub(crate) struct ClaimRequest {
- // Block height before which claiming is exclusive to one party,
- // after reaching it, claiming may be contentious.
- pub(crate) absolute_timelock: u32,
- // Timeout tx must have nLocktime set which means aggregating multiple
- // ones must take the higher nLocktime among them to satisfy all of them.
- // Sadly it has few pitfalls, a) it takes longuer to get fund back b) CLTV_DELTA
- // of a sooner-HTLC could be swallowed by the highest nLocktime of the HTLC set.
- // Do simplify we mark them as non-aggregable.
- pub(crate) aggregable: bool,
- // Basic bitcoin outpoint (txid, vout)
- pub(crate) outpoint: BitcoinOutPoint,
- // Following outpoint type, set of data needed to generate transaction digest
- // and satisfy witness program.
- pub(crate) witness_data: InputMaterial
-}
-
/// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
/// transaction causing it.
///
// *we* sign a holder commitment transaction, not when e.g. a watchtower broadcasts one of our
// holder commitment transactions.
if self.broadcasted_holder_revokable_script.is_some() {
- let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx);
+ let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, 0);
self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, None, broadcaster, fee_estimator, logger);
if let Some(ref tx) = self.prev_holder_signed_commitment_tx {
- let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx);
+ let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx, 0);
self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, None, broadcaster, fee_estimator, logger);
}
}
/// HTLC-Success/HTLC-Timeout transactions.
/// Return updates for HTLC pending in the channel and failed automatically by the broadcast of
/// revoked counterparty commitment tx
- fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, TransactionOutputs) where L::Target: Logger {
+ fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<PackageTemplate>, TransactionOutputs) where L::Target: Logger {
// Most secp and related errors trying to create keys means we have no hope of constructing
// a spend transaction...so we return no transactions to broadcast
let mut claimable_outpoints = Vec::new();
// First, process non-htlc outputs (to_holder & to_counterparty)
for (idx, outp) in tx.output.iter().enumerate() {
if outp.script_pubkey == revokeable_p2wsh {
- let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: outp.value, htlc: None, on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv};
- claimable_outpoints.push(ClaimRequest { absolute_timelock: height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: idx as u32 }, witness_data});
+ let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_tx_cache.counterparty_delayed_payment_base_key, self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_tx_cache.on_counterparty_tx_csv);
+ let justice_package = PackageTemplate::build_package(commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, true, height);
+ claimable_outpoints.push(justice_package);
}
}
tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user
}
- let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, input_descriptor: if htlc.offered { InputDescriptors::RevokedOfferedHTLC } else { InputDescriptors::RevokedReceivedHTLC }, amount: tx.output[transaction_output_index as usize].value, htlc: Some(htlc.clone()), on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv};
- claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable: true, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data });
+ let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_tx_cache.counterparty_delayed_payment_base_key, self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone());
+ let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, true, height);
+ claimable_outpoints.push(justice_package);
}
}
}
(claimable_outpoints, (commitment_txid, watch_outputs))
}
- fn get_counterparty_htlc_output_claim_reqs(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>) -> Vec<ClaimRequest> {
- let mut claims = Vec::new();
+ fn get_counterparty_htlc_output_claim_reqs(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>) -> Vec<PackageTemplate> {
+ let mut claimable_outpoints = Vec::new();
if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&commitment_txid) {
if let Some(revocation_points) = self.their_cur_revocation_points {
let revocation_point_option =
if let Some(transaction) = tx {
if transaction_output_index as usize >= transaction.output.len() ||
transaction.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
- return claims; // Corrupted per_commitment_data, fuck this user
+ return claimable_outpoints; // Corrupted per_commitment_data, fuck this user
}
}
- let preimage =
- if htlc.offered {
- if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) {
- Some(*p)
- } else { None }
- } else { None };
- let aggregable = if !htlc.offered { false } else { true };
+ let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
if preimage.is_some() || !htlc.offered {
- let witness_data = InputMaterial::CounterpartyHTLC { per_commitment_point: *revocation_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, preimage, htlc: htlc.clone() };
- claims.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data });
+ let counterparty_htlc_outp = if htlc.offered { PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(*revocation_point, self.counterparty_tx_cache.counterparty_delayed_payment_base_key, self.counterparty_tx_cache.counterparty_htlc_base_key, preimage.unwrap(), htlc.clone())) } else { PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(*revocation_point, self.counterparty_tx_cache.counterparty_delayed_payment_base_key, self.counterparty_tx_cache.counterparty_htlc_base_key, htlc.clone())) };
+ let aggregation = if !htlc.offered { false } else { true };
+ let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
+ claimable_outpoints.push(counterparty_package);
}
}
}
}
}
}
- claims
+ claimable_outpoints
}
/// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key
- fn check_spend_counterparty_htlc<L: Deref>(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec<ClaimRequest>, Option<TransactionOutputs>) where L::Target: Logger {
+ fn check_spend_counterparty_htlc<L: Deref>(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec<PackageTemplate>, Option<TransactionOutputs>) where L::Target: Logger {
let htlc_txid = tx.txid();
if tx.input.len() != 1 || tx.output.len() != 1 || tx.input[0].witness.len() != 5 {
return (Vec::new(), None)
let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0);
- let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: tx.output[0].value, htlc: None, on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv };
- let claimable_outpoints = vec!(ClaimRequest { absolute_timelock: height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: htlc_txid, vout: 0}, witness_data });
+ let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_tx_cache.counterparty_delayed_payment_base_key, self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, tx.output[0].value, self.counterparty_tx_cache.on_counterparty_tx_csv);
+ let justice_package = PackageTemplate::build_package(htlc_txid, 0, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, true, height);
+ let claimable_outpoints = vec!(justice_package);
let outputs = vec![(0, tx.output[0].clone())];
(claimable_outpoints, Some((htlc_txid, outputs)))
}
- // Returns (1) `ClaimRequest`s that can be given to the OnChainTxHandler, so that the handler can
+ // Returns (1) `PackageTemplate`s that can be given to the OnChainTxHandler, so that the handler can
// broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable
// script so we can detect whether a holder transaction has been seen on-chain.
- fn get_broadcasted_holder_claims(&self, holder_tx: &HolderSignedTx) -> (Vec<ClaimRequest>, Option<(Script, PublicKey, PublicKey)>) {
+ fn get_broadcasted_holder_claims(&self, holder_tx: &HolderSignedTx, height: u32) -> (Vec<PackageTemplate>, Option<(Script, PublicKey, PublicKey)>) {
let mut claim_requests = Vec::with_capacity(holder_tx.htlc_outputs.len());
let redeemscript = chan_utils::get_revokeable_redeemscript(&holder_tx.revocation_key, self.on_holder_tx_csv, &holder_tx.delayed_payment_key);
for &(ref htlc, _, _) in holder_tx.htlc_outputs.iter() {
if let Some(transaction_output_index) = htlc.transaction_output_index {
- claim_requests.push(ClaimRequest { absolute_timelock: ::core::u32::MAX, aggregable: false, outpoint: BitcoinOutPoint { txid: holder_tx.txid, vout: transaction_output_index as u32 },
- witness_data: InputMaterial::HolderHTLC {
- preimage: if !htlc.offered {
- if let Some(preimage) = self.payment_preimages.get(&htlc.payment_hash) {
- Some(preimage.clone())
- } else {
- // We can't build an HTLC-Success transaction without the preimage
- continue;
- }
- } else { None },
- amount: htlc.amount_msat,
- }});
+ let htlc_output = HolderHTLCOutput::build(if !htlc.offered {
+ if let Some(preimage) = self.payment_preimages.get(&htlc.payment_hash) {
+ Some(preimage.clone())
+ } else {
+ // We can't build an HTLC-Success transaction without the preimage
+ continue;
+ }
+ } else { None }, htlc.amount_msat);
+ let htlc_package = PackageTemplate::build_package(holder_tx.txid, transaction_output_index, PackageSolvingData::HolderHTLCOutput(htlc_output), height, false, height);
+ claim_requests.push(htlc_package);
}
}
/// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
/// revoked using data in holder_claimable_outpoints.
/// Should not be used if check_spend_revoked_transaction succeeds.
- fn check_spend_holder_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, TransactionOutputs) where L::Target: Logger {
+ fn check_spend_holder_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<PackageTemplate>, TransactionOutputs) where L::Target: Logger {
let commitment_txid = tx.txid();
let mut claim_requests = Vec::new();
let mut watch_outputs = Vec::new();
if self.current_holder_commitment_tx.txid == commitment_txid {
is_holder_tx = true;
log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim");
- let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx);
+ let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx);
append_onchain_update!(res, to_watch);
} else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
if holder_tx.txid == commitment_txid {
is_holder_tx = true;
log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim");
- let res = self.get_broadcasted_holder_claims(holder_tx);
+ let res = self.get_broadcasted_holder_claims(holder_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx);
append_onchain_update!(res, to_watch);
}
height: u32,
txn_matched: Vec<&Transaction>,
mut watch_outputs: Vec<TransactionOutputs>,
- mut claimable_outpoints: Vec<ClaimRequest>,
+ mut claimable_outpoints: Vec<PackageTemplate>,
broadcaster: B,
fee_estimator: F,
logger: L,
{
let should_broadcast = self.would_broadcast_at_height(height, &logger);
if should_broadcast {
- claimable_outpoints.push(ClaimRequest { absolute_timelock: height, aggregable: false, outpoint: BitcoinOutPoint { txid: self.funding_info.0.txid.clone(), vout: self.funding_info.0.index as u32 }, witness_data: InputMaterial::Funding { funding_redeemscript: self.funding_redeemscript.clone() }});
+ let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone());
+ let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), height, false, height);
+ claimable_outpoints.push(commitment_package);
self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
self.holder_tx_signed = true;
- let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx);
+ let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height);
let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx);
if !new_outputs.is_empty() {
watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
use bitcoin::network::constants::Network;
use hex;
use chain::channelmonitor::ChannelMonitor;
+ use chain::package::{WEIGHT_OFFERED_HTLC, WEIGHT_RECEIVED_HTLC, WEIGHT_REVOKED_OFFERED_HTLC, WEIGHT_REVOKED_RECEIVED_HTLC, WEIGHT_REVOKED_OUTPUT};
use chain::transaction::OutPoint;
use ln::{PaymentPreimage, PaymentHash};
use ln::channelmanager::BestBlock;
- use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
use ln::chan_utils;
use ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
let mut sum_actual_sigs = 0;
macro_rules! sign_input {
- ($sighash_parts: expr, $idx: expr, $amount: expr, $input_type: expr, $sum_actual_sigs: expr) => {
+ ($sighash_parts: expr, $idx: expr, $amount: expr, $weight: expr, $sum_actual_sigs: expr) => {
let htlc = HTLCOutputInCommitment {
- offered: if *$input_type == InputDescriptors::RevokedOfferedHTLC || *$input_type == InputDescriptors::OfferedHTLC { true } else { false },
+ offered: if *$weight == WEIGHT_REVOKED_OFFERED_HTLC || *$weight == WEIGHT_OFFERED_HTLC { true } else { false },
amount_msat: 0,
cltv_expiry: 2 << 16,
payment_hash: PaymentHash([1; 32]),
transaction_output_index: Some($idx as u32),
};
- let redeem_script = if *$input_type == InputDescriptors::RevokedOutput { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &pubkey, &pubkey, &pubkey) };
+ let redeem_script = if *$weight == WEIGHT_REVOKED_OUTPUT { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &pubkey, &pubkey, &pubkey) };
let sighash = hash_to_message!(&$sighash_parts.signature_hash($idx, &redeem_script, $amount, SigHashType::All)[..]);
let sig = secp_ctx.sign(&sighash, &privkey);
$sighash_parts.access_witness($idx).push(sig.serialize_der().to_vec());
$sighash_parts.access_witness($idx)[0].push(SigHashType::All as u8);
sum_actual_sigs += $sighash_parts.access_witness($idx)[0].len();
- if *$input_type == InputDescriptors::RevokedOutput {
+ if *$weight == WEIGHT_REVOKED_OUTPUT {
$sighash_parts.access_witness($idx).push(vec!(1));
- } else if *$input_type == InputDescriptors::RevokedOfferedHTLC || *$input_type == InputDescriptors::RevokedReceivedHTLC {
+ } else if *$weight == WEIGHT_REVOKED_OFFERED_HTLC || *$weight == WEIGHT_REVOKED_RECEIVED_HTLC {
$sighash_parts.access_witness($idx).push(pubkey.clone().serialize().to_vec());
- } else if *$input_type == InputDescriptors::ReceivedHTLC {
+ } else if *$weight == WEIGHT_RECEIVED_HTLC {
$sighash_parts.access_witness($idx).push(vec![0]);
} else {
$sighash_parts.access_witness($idx).push(PaymentPreimage([1; 32]).0.to_vec());
value: 0,
});
let base_weight = claim_tx.get_weight();
- let inputs_des = vec![InputDescriptors::RevokedOutput, InputDescriptors::RevokedOfferedHTLC, InputDescriptors::RevokedOfferedHTLC, InputDescriptors::RevokedReceivedHTLC];
+ let inputs_weight = vec![WEIGHT_REVOKED_OUTPUT, WEIGHT_REVOKED_OFFERED_HTLC, WEIGHT_REVOKED_OFFERED_HTLC, WEIGHT_REVOKED_RECEIVED_HTLC];
+ let mut inputs_total_weight = 2; // count segwit flags
{
let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
- for (idx, inp) in inputs_des.iter().enumerate() {
+ for (idx, inp) in inputs_weight.iter().enumerate() {
sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
+ inputs_total_weight += inp;
}
}
- assert_eq!(base_weight + OnchainTxHandler::<InMemorySigner>::get_witnesses_weight(&inputs_des[..]), claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_des.len() - sum_actual_sigs));
+ assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs));
// Claim tx with 1 offered HTLCs, 3 received HTLCs
claim_tx.input.clear();
});
}
let base_weight = claim_tx.get_weight();
- let inputs_des = vec![InputDescriptors::OfferedHTLC, InputDescriptors::ReceivedHTLC, InputDescriptors::ReceivedHTLC, InputDescriptors::ReceivedHTLC];
+ let inputs_weight = vec![WEIGHT_OFFERED_HTLC, WEIGHT_RECEIVED_HTLC, WEIGHT_RECEIVED_HTLC, WEIGHT_RECEIVED_HTLC];
+ let mut inputs_total_weight = 2; // count segwit flags
{
let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
- for (idx, inp) in inputs_des.iter().enumerate() {
+ for (idx, inp) in inputs_weight.iter().enumerate() {
sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
+ inputs_total_weight += inp;
}
}
- assert_eq!(base_weight + OnchainTxHandler::<InMemorySigner>::get_witnesses_weight(&inputs_des[..]), claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_des.len() - sum_actual_sigs));
+ assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs));
// Justice tx with 1 revoked HTLC-Success tx output
claim_tx.input.clear();
witness: Vec::new(),
});
let base_weight = claim_tx.get_weight();
- let inputs_des = vec![InputDescriptors::RevokedOutput];
+ let inputs_weight = vec![WEIGHT_REVOKED_OUTPUT];
+ let mut inputs_total_weight = 2; // count segwit flags
{
let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
- for (idx, inp) in inputs_des.iter().enumerate() {
+ for (idx, inp) in inputs_weight.iter().enumerate() {
sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
+ inputs_total_weight += inp;
}
}
- assert_eq!(base_weight + OnchainTxHandler::<InMemorySigner>::get_witnesses_weight(&inputs_des[..]), claim_tx.get_weight() + /* max_length_isg */ (73 * inputs_des.len() - sum_actual_sigs));
+ assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_isg */ (73 * inputs_weight.len() - sum_actual_sigs));
}
// Further testing is done in the ChannelManager integration tests.
pub mod channelmonitor;
pub mod transaction;
pub mod keysinterface;
+pub(crate) mod onchaintx;
+pub(crate) mod package;
/// An error when accessing the chain via [`Access`].
#[derive(Clone)]
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! The logic to build claims and bump in-flight transactions until confirmations.
+//!
+//! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
+//! building, tracking, bumping and notifications functions.
+
+use bitcoin::blockdata::transaction::Transaction;
+use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+use bitcoin::blockdata::script::Script;
+
+use bitcoin::hash_types::Txid;
+
+use bitcoin::secp256k1::{Secp256k1, Signature};
+use bitcoin::secp256k1;
+
+use ln::msgs::DecodeError;
+use ln::PaymentPreimage;
+use ln::chan_utils::{ChannelTransactionParameters, HolderCommitmentTransaction};
+use chain::chaininterface::{FeeEstimator, BroadcasterInterface};
+use chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
+use chain::keysinterface::{Sign, KeysInterface};
+use chain::package::PackageTemplate;
+use util::logger::Logger;
+use util::ser::{Readable, ReadableArgs, Writer, Writeable, VecWriter};
+use util::byte_utils;
+
+use std::collections::HashMap;
+use core::cmp;
+use core::ops::Deref;
+use core::mem::replace;
+
+const MAX_ALLOC_SIZE: usize = 64*1024;
+
+/// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
+/// transaction causing it.
+///
+/// Used to determine when the on-chain event can be considered safe from a chain reorganization.
+#[derive(PartialEq)]
+struct OnchainEventEntry {
+ txid: Txid,
+ height: u32,
+ event: OnchainEvent,
+}
+
+impl OnchainEventEntry {
+ fn confirmation_threshold(&self) -> u32 {
+ self.height + ANTI_REORG_DELAY - 1
+ }
+
+ fn has_reached_confirmation_threshold(&self, height: u32) -> bool {
+ height >= self.confirmation_threshold()
+ }
+}
+
+/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
+/// once they mature to enough confirmations (ANTI_REORG_DELAY)
+#[derive(PartialEq)]
+enum OnchainEvent {
+ /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
+ /// bump-txn candidate buffer.
+ Claim {
+ claim_request: Txid,
+ },
+ /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
+ /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
+ /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
+ ContentiousOutpoint {
+ package: PackageTemplate,
+ }
+}
+
+impl Readable for Option<Vec<Option<(usize, Signature)>>> {
+ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ match Readable::read(reader)? {
+ 0u8 => Ok(None),
+ 1u8 => {
+ let vlen: u64 = Readable::read(reader)?;
+ let mut ret = Vec::with_capacity(cmp::min(vlen as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<Option<(usize, Signature)>>()));
+ for _ in 0..vlen {
+ ret.push(match Readable::read(reader)? {
+ 0u8 => None,
+ 1u8 => Some((<u64 as Readable>::read(reader)? as usize, Readable::read(reader)?)),
+ _ => return Err(DecodeError::InvalidValue)
+ });
+ }
+ Ok(Some(ret))
+ },
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &Some(ref vec) => {
+ 1u8.write(writer)?;
+ (vec.len() as u64).write(writer)?;
+ for opt in vec.iter() {
+ match opt {
+ &Some((ref idx, ref sig)) => {
+ 1u8.write(writer)?;
+ (*idx as u64).write(writer)?;
+ sig.write(writer)?;
+ },
+ &None => 0u8.write(writer)?,
+ }
+ }
+ },
+ &None => 0u8.write(writer)?,
+ }
+ Ok(())
+ }
+}
+
+
+/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
+/// do RBF bumping if possible.
+pub struct OnchainTxHandler<ChannelSigner: Sign> {
+ destination_script: Script,
+ holder_commitment: HolderCommitmentTransaction,
+ // holder_htlc_sigs and prev_holder_htlc_sigs are in the order as they appear in the commitment
+ // transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
+ // the set of HTLCs in the HolderCommitmentTransaction.
+ holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
+ prev_holder_commitment: Option<HolderCommitmentTransaction>,
+ prev_holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
+
+ pub(super) signer: ChannelSigner,
+ pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
+
+ // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
+ // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
+ // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
+ // same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
+ // block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
+ // equality between spending transaction and claim request. If true, it means transaction was one our claiming one
+ // after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
+ // we need to regenerate new claim request with reduced set of still-claimable outpoints.
+ // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
+ // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
+ // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
+ #[cfg(test)] // Used in functional_test to verify sanitization
+ pub(crate) pending_claim_requests: HashMap<Txid, PackageTemplate>,
+ #[cfg(not(test))]
+ pending_claim_requests: HashMap<Txid, PackageTemplate>,
+
+ // Used to link outpoints claimed in a connected block to a pending claim request.
+ // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
+ // Value is (pending claim request identifier, confirmation_block), identifier
+ // is txid of the initial claiming transaction and is immutable until outpoint is
+ // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
+ // block with output gets disconnected.
+ #[cfg(test)] // Used in functional_test to verify sanitization
+ pub claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
+ #[cfg(not(test))]
+ claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
+
+ onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
+
+ latest_height: u32,
+
+ pub(super) secp_ctx: Secp256k1<secp256k1::All>,
+}
+
+const SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 1;
+
+impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
+ pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
+
+ self.destination_script.write(writer)?;
+ self.holder_commitment.write(writer)?;
+ self.holder_htlc_sigs.write(writer)?;
+ self.prev_holder_commitment.write(writer)?;
+ self.prev_holder_htlc_sigs.write(writer)?;
+
+ self.channel_transaction_parameters.write(writer)?;
+
+ let mut key_data = VecWriter(Vec::new());
+ self.signer.write(&mut key_data)?;
+ assert!(key_data.0.len() < core::usize::MAX);
+ assert!(key_data.0.len() < core::u32::MAX as usize);
+ (key_data.0.len() as u32).write(writer)?;
+ writer.write_all(&key_data.0[..])?;
+
+ writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
+ for (ref ancestor_claim_txid, request) in self.pending_claim_requests.iter() {
+ ancestor_claim_txid.write(writer)?;
+ request.write(writer)?;
+ }
+
+ writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
+ for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
+ outp.write(writer)?;
+ claim_and_height.0.write(writer)?;
+ claim_and_height.1.write(writer)?;
+ }
+
+ writer.write_all(&byte_utils::be64_to_array(self.onchain_events_awaiting_threshold_conf.len() as u64))?;
+ for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
+ entry.txid.write(writer)?;
+ writer.write_all(&byte_utils::be32_to_array(entry.height))?;
+ match entry.event {
+ OnchainEvent::Claim { ref claim_request } => {
+ writer.write_all(&[0; 1])?;
+ claim_request.write(writer)?;
+ },
+ OnchainEvent::ContentiousOutpoint { ref package } => {
+ writer.write_all(&[1; 1])?;
+ package.write(writer)?;
+ }
+ }
+ }
+ self.latest_height.write(writer)?;
+
+ write_tlv_fields!(writer, {}, {});
+ Ok(())
+ }
+}
+
+impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
+ fn read<R: ::std::io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
+ let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
+
+ let destination_script = Readable::read(reader)?;
+
+ let holder_commitment = Readable::read(reader)?;
+ let holder_htlc_sigs = Readable::read(reader)?;
+ let prev_holder_commitment = Readable::read(reader)?;
+ let prev_holder_htlc_sigs = Readable::read(reader)?;
+
+ let channel_parameters = Readable::read(reader)?;
+
+ let keys_len: u32 = Readable::read(reader)?;
+ let mut keys_data = Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE));
+ while keys_data.len() != keys_len as usize {
+ // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
+ let mut data = [0; 1024];
+ let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.len())];
+ reader.read_exact(read_slice)?;
+ keys_data.extend_from_slice(read_slice);
+ }
+ let signer = keys_manager.read_chan_signer(&keys_data)?;
+
+ let pending_claim_requests_len: u64 = Readable::read(reader)?;
+ let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..pending_claim_requests_len {
+ pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
+ }
+
+ let claimable_outpoints_len: u64 = Readable::read(reader)?;
+ let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..claimable_outpoints_len {
+ let outpoint = Readable::read(reader)?;
+ let ancestor_claim_txid = Readable::read(reader)?;
+ let height = Readable::read(reader)?;
+ claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
+ }
+ let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
+ let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..waiting_threshold_conf_len {
+ let txid = Readable::read(reader)?;
+ let height = Readable::read(reader)?;
+ let event = match <u8 as Readable>::read(reader)? {
+ 0 => {
+ let claim_request = Readable::read(reader)?;
+ OnchainEvent::Claim {
+ claim_request
+ }
+ },
+ 1 => {
+ let package = Readable::read(reader)?;
+ OnchainEvent::ContentiousOutpoint {
+ package
+ }
+ }
+ _ => return Err(DecodeError::InvalidValue),
+ };
+ onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { txid, height, event });
+ }
+ let latest_height = Readable::read(reader)?;
+
+ read_tlv_fields!(reader, {}, {});
+
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
+
+ Ok(OnchainTxHandler {
+ destination_script,
+ holder_commitment,
+ holder_htlc_sigs,
+ prev_holder_commitment,
+ prev_holder_htlc_sigs,
+ signer,
+ channel_transaction_parameters: channel_parameters,
+ claimable_outpoints,
+ pending_claim_requests,
+ onchain_events_awaiting_threshold_conf,
+ latest_height,
+ secp_ctx,
+ })
+ }
+}
+
+impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
+ pub(crate) fn new(destination_script: Script, signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1<secp256k1::All>) -> Self {
+ OnchainTxHandler {
+ destination_script,
+ holder_commitment,
+ holder_htlc_sigs: None,
+ prev_holder_commitment: None,
+ prev_holder_htlc_sigs: None,
+ signer,
+ channel_transaction_parameters: channel_parameters,
+ pending_claim_requests: HashMap::new(),
+ claimable_outpoints: HashMap::new(),
+ onchain_events_awaiting_threshold_conf: Vec::new(),
+ latest_height: 0,
+
+ secp_ctx,
+ }
+ }
+
+ /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
+ /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
+ /// Panics if there are signing errors, because signing operations in reaction to on-chain events
+ /// are not expected to fail, and if they do, we may lose funds.
+ fn generate_claim_tx<F: Deref, L: Deref>(&mut self, height: u32, cached_request: &PackageTemplate, fee_estimator: &F, logger: &L) -> Option<(Option<u32>, u64, Transaction)>
+ where F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ if cached_request.outpoints().len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
+
+ // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
+ // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
+ let new_timer = Some(cached_request.get_height_timer(height));
+ let amt = cached_request.package_amount();
+ if cached_request.is_malleable() {
+ let predicted_weight = cached_request.package_weight(&self.destination_script);
+ if let Some((output_value, new_feerate)) = cached_request.compute_package_output(predicted_weight, amt, fee_estimator, logger) {
+ assert!(new_feerate != 0);
+
+ let transaction = cached_request.finalize_package(self, output_value, self.destination_script.clone(), logger).unwrap();
+ log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate);
+ assert!(predicted_weight >= transaction.get_weight());
+ return Some((new_timer, new_feerate, transaction))
+ }
+ } else {
+ // Note: Currently, amounts of holder outputs spending witnesses aren't used
+ // as we can't malleate spending package to increase their feerate. This
+ // should change with the remaining anchor output patchset.
+ debug_assert!(amt == 0);
+ if let Some(transaction) = cached_request.finalize_package(self, amt, self.destination_script.clone(), logger) {
+ return Some((None, 0, transaction));
+ }
+ }
+ None
+ }
+
+ /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
+ /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
+ /// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
+ /// if we receive a preimage after force-close.
+ pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], requests: Vec<PackageTemplate>, latest_height: Option<u32>, broadcaster: &B, fee_estimator: &F, logger: &L)
+ where B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let height = match latest_height {
+ Some(h) => h,
+ None => self.latest_height,
+ };
+ log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
+ let mut preprocessed_requests = Vec::with_capacity(requests.len());
+ let mut aggregated_request = None;
+
+ // Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
+ // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
+ for req in requests {
+ // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
+ if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) { log_trace!(logger, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout); } else {
+ log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), height + CLTV_SHARED_CLAIM_BUFFER);
+ if req.timelock() <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
+ // Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
+ preprocessed_requests.push(req);
+ } else if aggregated_request.is_none() {
+ aggregated_request = Some(req);
+ } else {
+ aggregated_request.as_mut().unwrap().merge_package(req);
+ }
+ }
+ }
+ if let Some(req) = aggregated_request {
+ preprocessed_requests.push(req);
+ }
+
+ // Generate claim transactions and track them to bump if necessary at
+ // height timer expiration (i.e in how many blocks we're going to take action).
+ for mut req in preprocessed_requests {
+ if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &req, &*fee_estimator, &*logger) {
+ req.set_timer(new_timer);
+ req.set_feerate(new_feerate);
+ let txid = tx.txid();
+ for k in req.outpoints() {
+ log_trace!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
+ self.claimable_outpoints.insert(k.clone(), (txid, height));
+ }
+ self.pending_claim_requests.insert(txid, req);
+ log_trace!(logger, "Broadcasting onchain {}", log_tx!(tx));
+ broadcaster.broadcast_transaction(&tx);
+ }
+ }
+
+ let mut bump_candidates = HashMap::new();
+ for tx in txn_matched {
+ // Scan all input to verify is one of the outpoint spent is of interest for us
+ let mut claimed_outputs_material = Vec::new();
+ for inp in &tx.input {
+ if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
+ // If outpoint has claim request pending on it...
+ if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
+ //... we need to verify equality between transaction outpoints and claim request
+ // outpoints to know if transaction is the original claim or a bumped one issued
+ // by us.
+ let mut set_equality = true;
+ if request.outpoints().len() != tx.input.len() {
+ set_equality = false;
+ } else {
+ for (claim_inp, tx_inp) in request.outpoints().iter().zip(tx.input.iter()) {
+ if **claim_inp != tx_inp.previous_output {
+ set_equality = false;
+ }
+ }
+ }
+
+ macro_rules! clean_claim_request_after_safety_delay {
+ () => {
+ let entry = OnchainEventEntry {
+ txid: tx.txid(),
+ height,
+ event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }
+ };
+ if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
+ self.onchain_events_awaiting_threshold_conf.push(entry);
+ }
+ }
+ }
+
+ // If this is our transaction (or our counterparty spent all the outputs
+ // before we could anyway with same inputs order than us), wait for
+ // ANTI_REORG_DELAY and clean the RBF tracking map.
+ if set_equality {
+ clean_claim_request_after_safety_delay!();
+ } else { // If false, generate new claim request with update outpoint set
+ let mut at_least_one_drop = false;
+ for input in tx.input.iter() {
+ if let Some(package) = request.split_package(&input.previous_output) {
+ claimed_outputs_material.push(package);
+ at_least_one_drop = true;
+ }
+ // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
+ if request.outpoints().is_empty() {
+ clean_claim_request_after_safety_delay!();
+ }
+ }
+ //TODO: recompute soonest_timelock to avoid wasting a bit on fees
+ if at_least_one_drop {
+ bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone());
+ }
+ }
+ break; //No need to iterate further, either tx is our or their
+ } else {
+ panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
+ }
+ }
+ }
+ for package in claimed_outputs_material.drain(..) {
+ let entry = OnchainEventEntry {
+ txid: tx.txid(),
+ height,
+ event: OnchainEvent::ContentiousOutpoint { package },
+ };
+ if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
+ self.onchain_events_awaiting_threshold_conf.push(entry);
+ }
+ }
+ }
+
+ // After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
+ let onchain_events_awaiting_threshold_conf =
+ self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
+ for entry in onchain_events_awaiting_threshold_conf {
+ if entry.has_reached_confirmation_threshold(height) {
+ match entry.event {
+ OnchainEvent::Claim { claim_request } => {
+ // We may remove a whole set of claim outpoints here, as these one may have
+ // been aggregated in a single tx and claimed so atomically
+ if let Some(request) = self.pending_claim_requests.remove(&claim_request) {
+ for outpoint in request.outpoints() {
+ self.claimable_outpoints.remove(&outpoint);
+ }
+ }
+ },
+ OnchainEvent::ContentiousOutpoint { package } => {
+ self.claimable_outpoints.remove(&package.outpoints()[0]);
+ }
+ }
+ } else {
+ self.onchain_events_awaiting_threshold_conf.push(entry);
+ }
+ }
+
+ // Check if any pending claim request must be rescheduled
+ for (first_claim_txid, ref request) in self.pending_claim_requests.iter() {
+ if let Some(h) = request.timer() {
+ if height >= h {
+ bump_candidates.insert(*first_claim_txid, (*request).clone());
+ }
+ }
+ }
+
+ // Build, bump and rebroadcast tx accordingly
+ log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
+ for (first_claim_txid, request) in bump_candidates.iter() {
+ if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) {
+ log_trace!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
+ broadcaster.broadcast_transaction(&bump_tx);
+ if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
+ request.set_timer(new_timer);
+ request.set_feerate(new_feerate);
+ }
+ }
+ }
+ }
+
+ pub(crate) fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
+ &mut self,
+ txid: &Txid,
+ broadcaster: B,
+ fee_estimator: F,
+ logger: L,
+ ) where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let mut height = None;
+ for entry in self.onchain_events_awaiting_threshold_conf.iter() {
+ if entry.txid == *txid {
+ height = Some(entry.height);
+ break;
+ }
+ }
+
+ if let Some(height) = height {
+ self.block_disconnected(height, broadcaster, fee_estimator, logger);
+ }
+ }
+
+ pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F, logger: L)
+ where B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let mut bump_candidates = HashMap::new();
+ let onchain_events_awaiting_threshold_conf =
+ self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
+ for entry in onchain_events_awaiting_threshold_conf {
+ if entry.height >= height {
+ //- our claim tx on a commitment tx output
+ //- resurect outpoint back in its claimable set and regenerate tx
+ match entry.event {
+ OnchainEvent::ContentiousOutpoint { package } => {
+ if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&package.outpoints()[0]) {
+ if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
+ request.merge_package(package);
+ // Using a HashMap guarantee us than if we have multiple outpoints getting
+ // resurrected only one bump claim tx is going to be broadcast
+ bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone());
+ }
+ }
+ },
+ _ => {},
+ }
+ } else {
+ self.onchain_events_awaiting_threshold_conf.push(entry);
+ }
+ }
+ for (_, request) in bump_candidates.iter_mut() {
+ if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &&*fee_estimator, &&*logger) {
+ request.set_timer(new_timer);
+ request.set_feerate(new_feerate);
+ log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
+ broadcaster.broadcast_transaction(&bump_tx);
+ }
+ }
+ for (ancestor_claim_txid, request) in bump_candidates.drain() {
+ self.pending_claim_requests.insert(ancestor_claim_txid.0, request);
+ }
+ //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
+ // right now if one of the outpoint get disconnected, just erase whole pending claim request.
+ let mut remove_request = Vec::new();
+ self.claimable_outpoints.retain(|_, ref v|
+ if v.1 >= height {
+ remove_request.push(v.0.clone());
+ false
+ } else { true });
+ for req in remove_request {
+ self.pending_claim_requests.remove(&req);
+ }
+ }
+
+ pub(crate) fn get_relevant_txids(&self) -> Vec<Txid> {
+ let mut txids: Vec<Txid> = self.onchain_events_awaiting_threshold_conf
+ .iter()
+ .map(|entry| entry.txid)
+ .collect();
+ txids.sort_unstable();
+ txids.dedup();
+ txids
+ }
+
+ pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
+ self.prev_holder_commitment = Some(replace(&mut self.holder_commitment, tx));
+ self.holder_htlc_sigs = None;
+ }
+
+ // Normally holder HTLCs are signed at the same time as the holder commitment tx. However,
+ // in some configurations, the holder commitment tx has been signed and broadcast by a
+ // ChannelMonitor replica, so we handle that case here.
+ fn sign_latest_holder_htlcs(&mut self) {
+ if self.holder_htlc_sigs.is_none() {
+ let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
+ self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, sigs));
+ }
+ }
+
+ // Normally only the latest commitment tx and HTLCs need to be signed. However, in some
+ // configurations we may have updated our holder commitment but a replica of the ChannelMonitor
+ // broadcast the previous one before we sync with it. We handle that case here.
+ fn sign_prev_holder_htlcs(&mut self) {
+ if self.prev_holder_htlc_sigs.is_none() {
+ if let Some(ref holder_commitment) = self.prev_holder_commitment {
+ let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(holder_commitment, &self.secp_ctx).expect("sign previous holder commitment");
+ self.prev_holder_htlc_sigs = Some(Self::extract_holder_sigs(holder_commitment, sigs));
+ }
+ }
+ }
+
+ fn extract_holder_sigs(holder_commitment: &HolderCommitmentTransaction, sigs: Vec<Signature>) -> Vec<Option<(usize, Signature)>> {
+ let mut ret = Vec::new();
+ for (htlc_idx, (holder_sig, htlc)) in sigs.iter().zip(holder_commitment.htlcs().iter()).enumerate() {
+ let tx_idx = htlc.transaction_output_index.unwrap();
+ if ret.len() <= tx_idx as usize { ret.resize(tx_idx as usize + 1, None); }
+ ret[tx_idx as usize] = Some((htlc_idx, holder_sig.clone()));
+ }
+ ret
+ }
+
+ //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
+ // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
+ // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
+ // to monitor before.
+ pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
+ let (sig, htlc_sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("signing holder commitment");
+ self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
+ self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
+ }
+
+ #[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
+ pub(crate) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
+ let (sig, htlc_sigs) = self.signer.unsafe_sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
+ self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
+ self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
+ }
+
+ pub(crate) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
+ let mut htlc_tx = None;
+ let commitment_txid = self.holder_commitment.trust().txid();
+ // Check if the HTLC spends from the current holder commitment
+ if commitment_txid == outp.txid {
+ self.sign_latest_holder_htlcs();
+ if let &Some(ref htlc_sigs) = &self.holder_htlc_sigs {
+ let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
+ let trusted_tx = self.holder_commitment.trust();
+ let counterparty_htlc_sig = self.holder_commitment.counterparty_htlc_sigs[*htlc_idx];
+ htlc_tx = Some(trusted_tx
+ .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
+ }
+ }
+ // If the HTLC doesn't spend the current holder commitment, check if it spends the previous one
+ if htlc_tx.is_none() && self.prev_holder_commitment.is_some() {
+ let commitment_txid = self.prev_holder_commitment.as_ref().unwrap().trust().txid();
+ if commitment_txid == outp.txid {
+ self.sign_prev_holder_htlcs();
+ if let &Some(ref htlc_sigs) = &self.prev_holder_htlc_sigs {
+ let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
+ let holder_commitment = self.prev_holder_commitment.as_ref().unwrap();
+ let trusted_tx = holder_commitment.trust();
+ let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[*htlc_idx];
+ htlc_tx = Some(trusted_tx
+ .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
+ }
+ }
+ }
+ htlc_tx
+ }
+
+ #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
+ pub(crate) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
+ let latest_had_sigs = self.holder_htlc_sigs.is_some();
+ let prev_had_sigs = self.prev_holder_htlc_sigs.is_some();
+ let ret = self.get_fully_signed_htlc_tx(outp, preimage);
+ if !latest_had_sigs {
+ self.holder_htlc_sigs = None;
+ }
+ if !prev_had_sigs {
+ self.prev_holder_htlc_sigs = None;
+ }
+ ret
+ }
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Various utilities to assemble claimable outpoints in package of one or more transactions. Those
+//! packages are attached metadata, guiding their aggregable or fee-bumping re-schedule. This file
+//! also includes witness weight computation and fee computation methods.
+
+use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
+use bitcoin::blockdata::transaction::{TxOut,TxIn, Transaction, SigHashType};
+use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+use bitcoin::blockdata::script::Script;
+
+use bitcoin::hash_types::Txid;
+
+use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+
+use ln::PaymentPreimage;
+use ln::chan_utils::{TxCreationKeys, HTLCOutputInCommitment, HTLC_OUTPUT_IN_COMMITMENT_SIZE};
+use ln::chan_utils;
+use ln::msgs::DecodeError;
+use chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
+use chain::keysinterface::Sign;
+use chain::onchaintx::OnchainTxHandler;
+use util::byte_utils;
+use util::logger::Logger;
+use util::ser::{Readable, Writer, Writeable};
+
+use std::cmp;
+use std::mem;
+use std::ops::Deref;
+
+const MAX_ALLOC_SIZE: usize = 64*1024;
+
+
+// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
+pub(crate) const WEIGHT_REVOKED_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 133;
+// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
+pub(crate) const WEIGHT_REVOKED_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 139;
+// number_of_witness_elements + sig_length + counterpartyhtlc_sig + preimage_length + preimage + witness_script_length + witness_script
+pub(crate) const WEIGHT_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 32 + 1 + 133;
+// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
+pub(crate) const WEIGHT_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 139;
+// number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
+pub(crate) const WEIGHT_REVOKED_OUTPUT: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 77;
+
+/// Height delay at which transactions are fee-bumped/rebroadcasted with a low priority.
+const LOW_FREQUENCY_BUMP_INTERVAL: u32 = 15;
+/// Height delay at which transactions are fee-bumped/rebroadcasted with a middle priority.
+const MIDDLE_FREQUENCY_BUMP_INTERVAL: u32 = 3;
+/// Height delay at which transactions are fee-bumped/rebroadcasted with a high priority.
+const HIGH_FREQUENCY_BUMP_INTERVAL: u32 = 1;
+
+/// A struct to describe a revoked output and corresponding information to generate a solving
+/// witness spending a commitment `to_local` output or a second-stage HTLC transaction output.
+///
+/// CSV and pubkeys are used as part of a witnessScript redeeming a balance output, amount is used
+/// as part of the signature hash and revocation secret to generate a satisfying witness.
+#[derive(Clone, PartialEq)]
+pub(crate) struct RevokedOutput {
+ per_commitment_point: PublicKey,
+ counterparty_delayed_payment_base_key: PublicKey,
+ counterparty_htlc_base_key: PublicKey,
+ per_commitment_key: SecretKey,
+ weight: u64,
+ amount: u64,
+ on_counterparty_tx_csv: u16,
+}
+
+impl RevokedOutput {
+ pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, on_counterparty_tx_csv: u16) -> Self {
+ RevokedOutput {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ per_commitment_key,
+ weight: WEIGHT_REVOKED_OUTPUT,
+ amount,
+ on_counterparty_tx_csv
+ }
+ }
+}
+
+impl_writeable!(RevokedOutput, 33*3 + 32 + 8 + 8 + 2, {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ per_commitment_key,
+ weight,
+ amount,
+ on_counterparty_tx_csv
+});
+
+/// A struct to describe a revoked offered output and corresponding information to generate a
+/// solving witness.
+///
+/// HTLCOuputInCommitment (hash timelock, direction) and pubkeys are used to generate a suitable
+/// witnessScript.
+///
+/// CSV is used as part of a witnessScript redeeming a balance output, amount is used as part
+/// of the signature hash and revocation secret to generate a satisfying witness.
+#[derive(Clone, PartialEq)]
+pub(crate) struct RevokedHTLCOutput {
+ per_commitment_point: PublicKey,
+ counterparty_delayed_payment_base_key: PublicKey,
+ counterparty_htlc_base_key: PublicKey,
+ per_commitment_key: SecretKey,
+ weight: u64,
+ amount: u64,
+ htlc: HTLCOutputInCommitment,
+}
+
+impl RevokedHTLCOutput {
+ pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment) -> Self {
+ let weight = if htlc.offered { WEIGHT_REVOKED_OFFERED_HTLC } else { WEIGHT_REVOKED_RECEIVED_HTLC };
+ RevokedHTLCOutput {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ per_commitment_key,
+ weight,
+ amount,
+ htlc
+ }
+ }
+}
+
+impl_writeable!(RevokedHTLCOutput, 33*3 + 32 + 8 + 8 + HTLC_OUTPUT_IN_COMMITMENT_SIZE, {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ per_commitment_key,
+ weight,
+ amount,
+ htlc
+});
+
+/// A struct to describe a HTLC output on a counterparty commitment transaction.
+///
+/// HTLCOutputInCommitment (hash, timelock, directon) and pubkeys are used to generate a suitable
+/// witnessScript.
+///
+/// The preimage is used as part of the witness.
+#[derive(Clone, PartialEq)]
+pub(crate) struct CounterpartyOfferedHTLCOutput {
+ per_commitment_point: PublicKey,
+ counterparty_delayed_payment_base_key: PublicKey,
+ counterparty_htlc_base_key: PublicKey,
+ preimage: PaymentPreimage,
+ htlc: HTLCOutputInCommitment
+}
+
+impl CounterpartyOfferedHTLCOutput {
+ pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment) -> Self {
+ CounterpartyOfferedHTLCOutput {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ preimage,
+ htlc
+ }
+ }
+}
+
+impl_writeable!(CounterpartyOfferedHTLCOutput, 33*3 + 32 + HTLC_OUTPUT_IN_COMMITMENT_SIZE, {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ preimage,
+ htlc
+});
+
+/// A struct to describe a HTLC output on a counterparty commitment transaction.
+///
+/// HTLCOutputInCommitment (hash, timelock, directon) and pubkeys are used to generate a suitable
+/// witnessScript.
+#[derive(Clone, PartialEq)]
+pub(crate) struct CounterpartyReceivedHTLCOutput {
+ per_commitment_point: PublicKey,
+ counterparty_delayed_payment_base_key: PublicKey,
+ counterparty_htlc_base_key: PublicKey,
+ htlc: HTLCOutputInCommitment
+}
+
+impl CounterpartyReceivedHTLCOutput {
+ pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, htlc: HTLCOutputInCommitment) -> Self {
+ CounterpartyReceivedHTLCOutput {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ htlc
+ }
+ }
+}
+
+impl_writeable!(CounterpartyReceivedHTLCOutput, 33*3 + HTLC_OUTPUT_IN_COMMITMENT_SIZE, {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ htlc
+});
+
+/// A struct to describe a HTLC output on holder commitment transaction.
+///
+/// Either offered or received, the amount is always used as part of the bip143 sighash.
+/// Preimage is only included as part of the witness in former case.
+#[derive(Clone, PartialEq)]
+pub(crate) struct HolderHTLCOutput {
+ preimage: Option<PaymentPreimage>,
+ amount: u64,
+}
+
+impl HolderHTLCOutput {
+ pub(crate) fn build(preimage: Option<PaymentPreimage>, amount: u64) -> Self {
+ HolderHTLCOutput {
+ preimage,
+ amount
+ }
+ }
+}
+
+impl_writeable!(HolderHTLCOutput, 0, {
+ preimage,
+ amount
+});
+
+/// A struct to describe the channel output on the funding transaction.
+///
+/// witnessScript is used as part of the witness redeeming the funding utxo.
+#[derive(Clone, PartialEq)]
+pub(crate) struct HolderFundingOutput {
+ funding_redeemscript: Script,
+}
+
+impl HolderFundingOutput {
+ pub(crate) fn build(funding_redeemscript: Script) -> Self {
+ HolderFundingOutput {
+ funding_redeemscript,
+ }
+ }
+}
+
+impl_writeable!(HolderFundingOutput, 0, {
+ funding_redeemscript
+});
+
+/// A wrapper encapsulating all in-protocol differing outputs types.
+///
+/// The generic API offers access to an outputs common attributes or allow transformation such as
+/// finalizing an input claiming the output.
+#[derive(Clone, PartialEq)]
+pub(crate) enum PackageSolvingData {
+ RevokedOutput(RevokedOutput),
+ RevokedHTLCOutput(RevokedHTLCOutput),
+ CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput),
+ CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput),
+ HolderHTLCOutput(HolderHTLCOutput),
+ HolderFundingOutput(HolderFundingOutput),
+}
+
+impl PackageSolvingData {
+ fn amount(&self) -> u64 {
+ let amt = match self {
+ PackageSolvingData::RevokedOutput(ref outp) => { outp.amount },
+ PackageSolvingData::RevokedHTLCOutput(ref outp) => { outp.amount },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => { outp.htlc.amount_msat / 1000 },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => { outp.htlc.amount_msat / 1000 },
+ // Note: Currently, amounts of holder outputs spending witnesses aren't used
+ // as we can't malleate spending package to increase their feerate. This
+ // should change with the remaining anchor output patchset.
+ PackageSolvingData::HolderHTLCOutput(..) => { 0 },
+ PackageSolvingData::HolderFundingOutput(..) => { 0 },
+ };
+ amt
+ }
+ fn weight(&self) -> usize {
+ let weight = match self {
+ PackageSolvingData::RevokedOutput(ref outp) => { outp.weight as usize },
+ PackageSolvingData::RevokedHTLCOutput(ref outp) => { outp.weight as usize },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { WEIGHT_OFFERED_HTLC as usize },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { WEIGHT_RECEIVED_HTLC as usize },
+ // Note: Currently, weights of holder outputs spending witnesses aren't used
+ // as we can't malleate spending package to increase their feerate. This
+ // should change with the remaining anchor output patchset.
+ PackageSolvingData::HolderHTLCOutput(..) => { debug_assert!(false); 0 },
+ PackageSolvingData::HolderFundingOutput(..) => { debug_assert!(false); 0 },
+ };
+ weight
+ }
+ fn is_compatible(&self, input: &PackageSolvingData) -> bool {
+ match self {
+ PackageSolvingData::RevokedOutput(..) => {
+ match input {
+ PackageSolvingData::RevokedHTLCOutput(..) => { true },
+ PackageSolvingData::RevokedOutput(..) => { true },
+ _ => { false }
+ }
+ },
+ PackageSolvingData::RevokedHTLCOutput(..) => {
+ match input {
+ PackageSolvingData::RevokedOutput(..) => { true },
+ PackageSolvingData::RevokedHTLCOutput(..) => { true },
+ _ => { false }
+ }
+ },
+ _ => { mem::discriminant(self) == mem::discriminant(&input) }
+ }
+ }
+ fn finalize_input<Signer: Sign>(&self, bumped_tx: &mut Transaction, i: usize, onchain_handler: &mut OnchainTxHandler<Signer>) -> bool {
+ match self {
+ PackageSolvingData::RevokedOutput(ref outp) => {
+ if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
+ let witness_script = chan_utils::get_revokeable_redeemscript(&chan_keys.revocation_key, outp.on_counterparty_tx_csv, &chan_keys.broadcaster_delayed_payment_key);
+ //TODO: should we panic on signer failure ?
+ if let Ok(sig) = onchain_handler.signer.sign_justice_revoked_output(&bumped_tx, i, outp.amount, &outp.per_commitment_key, &onchain_handler.secp_ctx) {
+ bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
+ bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+ bumped_tx.input[i].witness.push(vec!(1));
+ bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
+ } else { return false; }
+ }
+ },
+ PackageSolvingData::RevokedHTLCOutput(ref outp) => {
+ if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
+ let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
+ //TODO: should we panic on signer failure ?
+ if let Ok(sig) = onchain_handler.signer.sign_justice_revoked_htlc(&bumped_tx, i, outp.amount, &outp.per_commitment_key, &outp.htlc, &onchain_handler.secp_ctx) {
+ bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
+ bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+ bumped_tx.input[i].witness.push(chan_keys.revocation_key.clone().serialize().to_vec());
+ bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
+ } else { return false; }
+ }
+ },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => {
+ if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
+ let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
+
+ if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
+ bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
+ bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+ bumped_tx.input[i].witness.push(outp.preimage.0.to_vec());
+ bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
+ }
+ }
+ },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => {
+ if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
+ let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
+
+ bumped_tx.lock_time = outp.htlc.cltv_expiry; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
+ if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
+ bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
+ bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+ // Due to BIP146 (MINIMALIF) this must be a zero-length element to relay.
+ bumped_tx.input[i].witness.push(vec![]);
+ bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
+ }
+ }
+ },
+ _ => { panic!("API Error!"); }
+ }
+ true
+ }
+ fn get_finalized_tx<Signer: Sign>(&self, outpoint: &BitcoinOutPoint, onchain_handler: &mut OnchainTxHandler<Signer>) -> Option<Transaction> {
+ match self {
+ PackageSolvingData::HolderHTLCOutput(ref outp) => { return onchain_handler.get_fully_signed_htlc_tx(outpoint, &outp.preimage); }
+ PackageSolvingData::HolderFundingOutput(ref outp) => { return Some(onchain_handler.get_fully_signed_holder_tx(&outp.funding_redeemscript)); }
+ _ => { panic!("API Error!"); }
+ }
+ }
+}
+
+impl Writeable for PackageSolvingData {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ PackageSolvingData::RevokedOutput(ref revoked_outp) => {
+ 0u8.write(writer)?;
+ revoked_outp.write(writer)?;
+ },
+ PackageSolvingData::RevokedHTLCOutput(ref revoked_outp) => {
+ 1u8.write(writer)?;
+ revoked_outp.write(writer)?;
+ },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(ref counterparty_outp) => {
+ 2u8.write(writer)?;
+ counterparty_outp.write(writer)?;
+ },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(ref counterparty_outp) => {
+ 3u8.write(writer)?;
+ counterparty_outp.write(writer)?;
+ },
+ PackageSolvingData::HolderHTLCOutput(ref holder_outp) => {
+ 4u8.write(writer)?;
+ holder_outp.write(writer)?;
+ },
+ PackageSolvingData::HolderFundingOutput(ref funding_outp) => {
+ 5u8.write(writer)?;
+ funding_outp.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl Readable for PackageSolvingData {
+ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let byte = <u8 as Readable>::read(reader)?;
+ let solving_data = match byte {
+ 0 => {
+ PackageSolvingData::RevokedOutput(Readable::read(reader)?)
+ },
+ 1 => {
+ PackageSolvingData::RevokedHTLCOutput(Readable::read(reader)?)
+ },
+ 2 => {
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(Readable::read(reader)?)
+ },
+ 3 => {
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(Readable::read(reader)?)
+ },
+ 4 => {
+ PackageSolvingData::HolderHTLCOutput(Readable::read(reader)?)
+ },
+ 5 => {
+ PackageSolvingData::HolderFundingOutput(Readable::read(reader)?)
+ }
+ _ => return Err(DecodeError::UnknownVersion)
+ };
+ Ok(solving_data)
+ }
+}
+
+/// A malleable package might be aggregated with other packages to save on fees.
+/// A untractable package has been counter-signed and aggregable will break cached counterparty
+/// signatures.
+#[derive(Clone, PartialEq)]
+pub(crate) enum PackageMalleability {
+ Malleable,
+ Untractable,
+}
+
+/// A structure to describe a package content that is generated by ChannelMonitor and
+/// used by OnchainTxHandler to generate and broadcast transactions settling onchain claims.
+///
+/// A package is defined as one or more transactions claiming onchain outputs in reaction
+/// to confirmation of a channel transaction. Those packages might be aggregated to save on
+/// fees, if satisfaction of outputs's witnessScript let's us do so.
+///
+/// As packages are time-sensitive, we fee-bump and rebroadcast them at scheduled intervals.
+/// Failing to confirm a package translate as a loss of funds for the user.
+#[derive(Clone, PartialEq)]
+pub struct PackageTemplate {
+ // List of onchain outputs and solving data to generate satisfying witnesses.
+ inputs: Vec<(BitcoinOutPoint, PackageSolvingData)>,
+ // Packages are deemed as malleable if we have local knwoledge of at least one set of
+ // private keys yielding a satisfying witnesses. Malleability implies that we can aggregate
+ // packages among them to save on fees or rely on RBF to bump their feerates.
+ // Untractable packages have been counter-signed and thus imply that we can't aggregate
+ // them without breaking signatures. Fee-bumping strategy will also rely on CPFP.
+ malleability: PackageMalleability,
+ // Block height after which the earlier-output belonging to this package is mature for a
+ // competing claim by the counterparty. As our chain tip becomes nearer from the timelock,
+ // the fee-bumping frequency will increase. See `OnchainTxHandler::get_height_timer`.
+ soonest_conf_deadline: u32,
+ // Determines if this package can be aggregated.
+ // Timelocked outputs belonging to the same transaction might have differing
+ // satisfying heights. Picking up the later height among the output set would be a valid
+ // aggregable strategy but it comes with at least 2 trade-offs :
+ // * earlier-output fund are going to take longer to come back
+ // * CLTV delta backing up a corresponding HTLC on an upstream channel could be swallowed
+ // by the requirement of the later-output part of the set
+ // For now, we mark such timelocked outputs as non-aggregable, though we might introduce
+ // smarter aggregable strategy in the future.
+ aggregable: bool,
+ // Cache of package feerate committed at previous (re)broadcast. If bumping resources
+ // (either claimed output value or external utxo), it will keep increasing until holder
+ // or counterparty successful claim.
+ feerate_previous: u64,
+ // Cache of next height at which fee-bumping and rebroadcast will be attempted. In
+ // the future, we might abstract it to an observed mempool fluctuation.
+ height_timer: Option<u32>,
+ // Confirmation height of the claimed outputs set transaction. In case of reorg reaching
+ // it, we wipe out and forget the package.
+ height_original: u32,
+}
+
+impl PackageTemplate {
+ pub(crate) fn is_malleable(&self) -> bool {
+ self.malleability == PackageMalleability::Malleable
+ }
+ pub(crate) fn timelock(&self) -> u32 {
+ self.soonest_conf_deadline
+ }
+ pub(crate) fn aggregable(&self) -> bool {
+ self.aggregable
+ }
+ pub(crate) fn set_feerate(&mut self, new_feerate: u64) {
+ self.feerate_previous = new_feerate;
+ }
+ pub(crate) fn timer(&self) -> Option<u32> {
+ if let Some(ref timer) = self.height_timer {
+ return Some(*timer);
+ }
+ None
+ }
+ pub(crate) fn set_timer(&mut self, new_timer: Option<u32>) {
+ self.height_timer = new_timer;
+ }
+ pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> {
+ self.inputs.iter().map(|(o, _)| o).collect()
+ }
+ pub(crate) fn split_package(&mut self, split_outp: &BitcoinOutPoint) -> Option<PackageTemplate> {
+ match self.malleability {
+ PackageMalleability::Malleable => {
+ let mut split_package = None;
+ let timelock = self.soonest_conf_deadline;
+ let aggregable = self.aggregable;
+ let feerate_previous = self.feerate_previous;
+ let height_timer = self.height_timer;
+ let height_original = self.height_original;
+ self.inputs.retain(|outp| {
+ if *split_outp == outp.0 {
+ split_package = Some(PackageTemplate {
+ inputs: vec![(outp.0, outp.1.clone())],
+ malleability: PackageMalleability::Malleable,
+ soonest_conf_deadline: timelock,
+ aggregable,
+ feerate_previous,
+ height_timer,
+ height_original,
+ });
+ return false;
+ }
+ return true;
+ });
+ return split_package;
+ },
+ _ => {
+ // Note, we may try to split on remote transaction for
+ // which we don't have a competing one (HTLC-Success before
+ // timelock expiration). This explain we don't panic!
+ // We should refactor OnchainTxHandler::block_connected to
+ // only test equality on competing claims.
+ return None;
+ }
+ }
+ }
+ pub(crate) fn merge_package(&mut self, mut merge_from: PackageTemplate) {
+ assert_eq!(self.height_original, merge_from.height_original);
+ if self.malleability == PackageMalleability::Untractable || merge_from.malleability == PackageMalleability::Untractable {
+ panic!("Merging template on untractable packages");
+ }
+ if !self.aggregable || !merge_from.aggregable {
+ panic!("Merging non aggregatable packages");
+ }
+ if let Some((_, lead_input)) = self.inputs.first() {
+ for (_, v) in merge_from.inputs.iter() {
+ if !lead_input.is_compatible(v) { panic!("Merging outputs from differing types !"); }
+ }
+ } else { panic!("Merging template on an empty package"); }
+ for (k, v) in merge_from.inputs.drain(..) {
+ self.inputs.push((k, v));
+ }
+ //TODO: verify coverage and sanity?
+ if self.soonest_conf_deadline > merge_from.soonest_conf_deadline {
+ self.soonest_conf_deadline = merge_from.soonest_conf_deadline;
+ }
+ if self.feerate_previous > merge_from.feerate_previous {
+ self.feerate_previous = merge_from.feerate_previous;
+ }
+ self.height_timer = cmp::min(self.height_timer, merge_from.height_timer);
+ }
+ pub(crate) fn package_amount(&self) -> u64 {
+ let mut amounts = 0;
+ for (_, outp) in self.inputs.iter() {
+ amounts += outp.amount();
+ }
+ amounts
+ }
+ pub(crate) fn package_weight(&self, destination_script: &Script) -> usize {
+ let mut inputs_weight = 0;
+ let mut witnesses_weight = 2; // count segwit flags
+ for (_, outp) in self.inputs.iter() {
+ // previous_out_point: 36 bytes ; var_int: 1 byte ; sequence: 4 bytes
+ inputs_weight += 41 * WITNESS_SCALE_FACTOR;
+ witnesses_weight += outp.weight();
+ }
+ // version: 4 bytes ; count_tx_in: 1 byte ; count_tx_out: 1 byte ; lock_time: 4 bytes
+ let transaction_weight = 10 * WITNESS_SCALE_FACTOR;
+ // value: 8 bytes ; var_int: 1 byte ; pk_script: `destination_script.len()`
+ let output_weight = (8 + 1 + destination_script.len()) * WITNESS_SCALE_FACTOR;
+ inputs_weight + witnesses_weight + transaction_weight + output_weight
+ }
+ pub(crate) fn finalize_package<L: Deref, Signer: Sign>(&self, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64, destination_script: Script, logger: &L) -> Option<Transaction>
+ where L::Target: Logger,
+ {
+ match self.malleability {
+ PackageMalleability::Malleable => {
+ let mut bumped_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: vec![],
+ output: vec![TxOut {
+ script_pubkey: destination_script,
+ value,
+ }],
+ };
+ for (outpoint, _) in self.inputs.iter() {
+ bumped_tx.input.push(TxIn {
+ previous_output: *outpoint,
+ script_sig: Script::new(),
+ sequence: 0xfffffffd,
+ witness: Vec::new(),
+ });
+ }
+ for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
+ log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+ if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; }
+ }
+ log_trace!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
+ return Some(bumped_tx);
+ },
+ PackageMalleability::Untractable => {
+ if let Some((outpoint, outp)) = self.inputs.first() {
+ if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
+ log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+ log_trace!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
+ return Some(final_tx);
+ }
+ return None;
+ } else { panic!("API Error: Package must not be inputs empty"); }
+ },
+ }
+ }
+ /// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
+ /// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
+ /// height that once reached we should generate a new bumped "version" of the claim tx to be sure that we safely claim outputs before
+ /// that our counterparty can do so. If timelock expires soon, height timer is going to be scaled down in consequence to increase
+ /// frequency of the bump and so increase our bets of success.
+ pub(crate) fn get_height_timer(&self, current_height: u32) -> u32 {
+ if self.soonest_conf_deadline <= current_height + MIDDLE_FREQUENCY_BUMP_INTERVAL {
+ return current_height + HIGH_FREQUENCY_BUMP_INTERVAL
+ } else if self.soonest_conf_deadline - current_height <= LOW_FREQUENCY_BUMP_INTERVAL {
+ return current_height + MIDDLE_FREQUENCY_BUMP_INTERVAL
+ }
+ current_height + LOW_FREQUENCY_BUMP_INTERVAL
+ }
+ /// Returns value in satoshis to be included as package outgoing output amount and feerate with which package finalization should be done.
+ pub(crate) fn compute_package_output<F: Deref, L: Deref>(&self, predicted_weight: usize, input_amounts: u64, fee_estimator: &F, logger: &L) -> Option<(u64, u64)>
+ where F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ // If old feerate is 0, first iteration of this claim, use normal fee calculation
+ if self.feerate_previous != 0 {
+ if let Some((new_fee, feerate)) = feerate_bump(predicted_weight, input_amounts, self.feerate_previous, fee_estimator, logger) {
+ // If new computed fee is superior at the whole claimable amount burn all in fees
+ if new_fee > input_amounts {
+ return Some((0, feerate));
+ } else {
+ return Some((input_amounts - new_fee, feerate));
+ }
+ }
+ } else {
+ if let Some((new_fee, feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) {
+ return Some((input_amounts - new_fee, feerate));
+ }
+ }
+ None
+ }
+ pub (crate) fn build_package(txid: Txid, vout: u32, input_solving_data: PackageSolvingData, soonest_conf_deadline: u32, aggregable: bool, height_original: u32) -> Self {
+ let malleability = match input_solving_data {
+ PackageSolvingData::RevokedOutput(..) => { PackageMalleability::Malleable },
+ PackageSolvingData::RevokedHTLCOutput(..) => { PackageMalleability::Malleable },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { PackageMalleability::Malleable },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { PackageMalleability::Malleable },
+ PackageSolvingData::HolderHTLCOutput(..) => { PackageMalleability::Untractable },
+ PackageSolvingData::HolderFundingOutput(..) => { PackageMalleability::Untractable },
+ };
+ let mut inputs = Vec::with_capacity(1);
+ inputs.push((BitcoinOutPoint { txid, vout }, input_solving_data));
+ PackageTemplate {
+ inputs,
+ malleability,
+ soonest_conf_deadline,
+ aggregable,
+ feerate_previous: 0,
+ height_timer: None,
+ height_original,
+ }
+ }
+}
+
+impl Writeable for PackageTemplate {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ writer.write_all(&byte_utils::be64_to_array(self.inputs.len() as u64))?;
+ for (ref outpoint, ref rev_outp) in self.inputs.iter() {
+ outpoint.write(writer)?;
+ rev_outp.write(writer)?;
+ }
+ self.soonest_conf_deadline.write(writer)?;
+ self.feerate_previous.write(writer)?;
+ self.height_timer.write(writer)?;
+ self.height_original.write(writer)?;
+ Ok(())
+ }
+}
+
+impl Readable for PackageTemplate {
+ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let inputs_count = <u64 as Readable>::read(reader)?;
+ let mut inputs: Vec<(BitcoinOutPoint, PackageSolvingData)> = Vec::with_capacity(cmp::min(inputs_count as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..inputs_count {
+ let outpoint = Readable::read(reader)?;
+ let rev_outp = Readable::read(reader)?;
+ inputs.push((outpoint, rev_outp));
+ }
+ let (malleability, aggregable) = if let Some((_, lead_input)) = inputs.first() {
+ match lead_input {
+ PackageSolvingData::RevokedOutput(..) => { (PackageMalleability::Malleable, true) },
+ PackageSolvingData::RevokedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { (PackageMalleability::Malleable, false) },
+ PackageSolvingData::HolderHTLCOutput(..) => { (PackageMalleability::Untractable, false) },
+ PackageSolvingData::HolderFundingOutput(..) => { (PackageMalleability::Untractable, false) },
+ }
+ } else { return Err(DecodeError::InvalidValue); };
+ let soonest_conf_deadline = Readable::read(reader)?;
+ let feerate_previous = Readable::read(reader)?;
+ let height_timer = Readable::read(reader)?;
+ let height_original = Readable::read(reader)?;
+ Ok(PackageTemplate {
+ inputs,
+ malleability,
+ soonest_conf_deadline,
+ aggregable,
+ feerate_previous,
+ height_timer,
+ height_original,
+ })
+ }
+}
+
+/// Attempt to propose a bumping fee for a transaction from its spent output's values and predicted
+/// weight. We start with the highest priority feerate returned by the node's fee estimator then
+/// fall-back to lower priorities until we have enough value available to suck from.
+///
+/// If the proposed fee is less than the available spent output's values, we return the proposed
+/// fee and the corresponding updated feerate. If the proposed fee is equal or more than the
+/// available spent output's values, we return nothing
+fn compute_fee_from_spent_amounts<F: Deref, L: Deref>(input_amounts: u64, predicted_weight: usize, fee_estimator: &F, logger: &L) -> Option<(u64, u64)>
+ where F::Target: FeeEstimator,
+ L::Target: Logger,
+{
+ let mut updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64;
+ let mut fee = updated_feerate * (predicted_weight as u64) / 1000;
+ if input_amounts <= fee {
+ updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal) as u64;
+ fee = updated_feerate * (predicted_weight as u64) / 1000;
+ if input_amounts <= fee {
+ updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background) as u64;
+ fee = updated_feerate * (predicted_weight as u64) / 1000;
+ if input_amounts <= fee {
+ log_error!(logger, "Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat)",
+ fee, input_amounts);
+ None
+ } else {
+ log_warn!(logger, "Used low priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
+ input_amounts);
+ Some((fee, updated_feerate))
+ }
+ } else {
+ log_warn!(logger, "Used medium priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
+ input_amounts);
+ Some((fee, updated_feerate))
+ }
+ } else {
+ Some((fee, updated_feerate))
+ }
+}
+
+/// Attempt to propose a bumping fee for a transaction from its spent output's values and predicted
+/// weight. If feerates proposed by the fee-estimator have been increasing since last fee-bumping
+/// attempt, use them. Otherwise, blindly bump the feerate by 25% of the previous feerate. We also
+/// verify that those bumping heuristics respect BIP125 rules 3) and 4) and if required adjust
+/// the new fee to meet the RBF policy requirement.
+fn feerate_bump<F: Deref, L: Deref>(predicted_weight: usize, input_amounts: u64, previous_feerate: u64, fee_estimator: &F, logger: &L) -> Option<(u64, u64)>
+ where F::Target: FeeEstimator,
+ L::Target: Logger,
+{
+ // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
+ let new_fee = if let Some((new_fee, _)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) {
+ let updated_feerate = new_fee / (predicted_weight as u64 * 1000);
+ if updated_feerate > previous_feerate {
+ new_fee
+ } else {
+ // ...else just increase the previous feerate by 25% (because that's a nice number)
+ let new_fee = previous_feerate * (predicted_weight as u64) / 750;
+ if input_amounts <= new_fee {
+ log_trace!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
+ return None;
+ }
+ new_fee
+ }
+ } else {
+ log_trace!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
+ return None;
+ };
+
+ let previous_fee = previous_feerate * (predicted_weight as u64) / 1000;
+ let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * (predicted_weight as u64) / 1000;
+ // BIP 125 Opt-in Full Replace-by-Fee Signaling
+ // * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions.
+ // * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting.
+ let new_fee = if new_fee < previous_fee + min_relay_fee {
+ new_fee + previous_fee + min_relay_fee - new_fee
+ } else {
+ new_fee
+ };
+ Some((new_fee, new_fee * 1000 / (predicted_weight as u64)))
+}
+
+#[cfg(test)]
+mod tests {
+ use chain::package::{CounterpartyReceivedHTLCOutput, HolderHTLCOutput, PackageTemplate, PackageSolvingData, RevokedOutput, WEIGHT_REVOKED_OUTPUT};
+ use chain::Txid;
+ use ln::chan_utils::HTLCOutputInCommitment;
+ use ln::{PaymentPreimage, PaymentHash};
+
+ use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
+ use bitcoin::blockdata::script::Script;
+ use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+
+ use bitcoin::hashes::hex::FromHex;
+
+ use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+ use bitcoin::secp256k1::Secp256k1;
+
+ macro_rules! dumb_revk_output {
+ ($secp_ctx: expr) => {
+ {
+ let dumb_scalar = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
+ let dumb_point = PublicKey::from_secret_key(&$secp_ctx, &dumb_scalar);
+ PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, dumb_point, dumb_point, dumb_scalar, 0, 0))
+ }
+ }
+ }
+
+ macro_rules! dumb_counterparty_output {
+ ($secp_ctx: expr, $amt: expr) => {
+ {
+ let dumb_scalar = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
+ let dumb_point = PublicKey::from_secret_key(&$secp_ctx, &dumb_scalar);
+ let hash = PaymentHash([1; 32]);
+ let htlc = HTLCOutputInCommitment { offered: true, amount_msat: $amt, cltv_expiry: 0, payment_hash: hash, transaction_output_index: None };
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(dumb_point, dumb_point, dumb_point, htlc))
+ }
+ }
+ }
+
+ macro_rules! dumb_htlc_output {
+ () => {
+ {
+ let preimage = PaymentPreimage([2;32]);
+ PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(Some(preimage), 0))
+ }
+ }
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_package_differing_heights() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let revk_outp = dumb_revk_output!(secp_ctx);
+
+ let mut package_one_hundred = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
+ let package_two_hundred = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 200);
+ package_one_hundred.merge_package(package_two_hundred);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_package_untractable_merge_to() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let revk_outp = dumb_revk_output!(secp_ctx);
+ let htlc_outp = dumb_htlc_output!();
+
+ let mut untractable_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
+ let malleable_package = PackageTemplate::build_package(txid, 1, htlc_outp.clone(), 1000, true, 100);
+ untractable_package.merge_package(malleable_package);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_package_untractable_merge_from() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let htlc_outp = dumb_htlc_output!();
+ let revk_outp = dumb_revk_output!(secp_ctx);
+
+ let mut malleable_package = PackageTemplate::build_package(txid, 0, htlc_outp.clone(), 1000, true, 100);
+ let untractable_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 100);
+ malleable_package.merge_package(untractable_package);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_package_noaggregation_to() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let revk_outp = dumb_revk_output!(secp_ctx);
+
+ let mut noaggregation_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, false, 100);
+ let aggregation_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 100);
+ noaggregation_package.merge_package(aggregation_package);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_package_noaggregation_from() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let revk_outp = dumb_revk_output!(secp_ctx);
+
+ let mut aggregation_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
+ let noaggregation_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, false, 100);
+ aggregation_package.merge_package(noaggregation_package);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_package_empty() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let revk_outp = dumb_revk_output!(secp_ctx);
+
+ let mut empty_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
+ empty_package.inputs = vec![];
+ let package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 100);
+ empty_package.merge_package(package);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_package_differing_categories() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let revk_outp = dumb_revk_output!(secp_ctx);
+ let counterparty_outp = dumb_counterparty_output!(secp_ctx, 0);
+
+ let mut revoked_package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, true, 100);
+ let counterparty_package = PackageTemplate::build_package(txid, 1, counterparty_outp, 1000, true, 100);
+ revoked_package.merge_package(counterparty_package);
+ }
+
+ #[test]
+ fn test_package_split_malleable() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let revk_outp_one = dumb_revk_output!(secp_ctx);
+ let revk_outp_two = dumb_revk_output!(secp_ctx);
+ let revk_outp_three = dumb_revk_output!(secp_ctx);
+
+ let mut package_one = PackageTemplate::build_package(txid, 0, revk_outp_one, 1000, true, 100);
+ let package_two = PackageTemplate::build_package(txid, 1, revk_outp_two, 1000, true, 100);
+ let package_three = PackageTemplate::build_package(txid, 2, revk_outp_three, 1000, true, 100);
+
+ package_one.merge_package(package_two);
+ package_one.merge_package(package_three);
+ assert_eq!(package_one.outpoints().len(), 3);
+
+ if let Some(split_package) = package_one.split_package(&BitcoinOutPoint { txid, vout: 1 }) {
+ // Packages attributes should be identical
+ assert!(split_package.is_malleable());
+ assert_eq!(split_package.soonest_conf_deadline, package_one.soonest_conf_deadline);
+ assert_eq!(split_package.aggregable, package_one.aggregable);
+ assert_eq!(split_package.feerate_previous, package_one.feerate_previous);
+ assert_eq!(split_package.height_timer, package_one.height_timer);
+ assert_eq!(split_package.height_original, package_one.height_original);
+ } else { panic!(); }
+ assert_eq!(package_one.outpoints().len(), 2);
+ }
+
+ #[test]
+ fn test_package_split_untractable() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let htlc_outp_one = dumb_htlc_output!();
+
+ let mut package_one = PackageTemplate::build_package(txid, 0, htlc_outp_one, 1000, true, 100);
+ let ret_split = package_one.split_package(&BitcoinOutPoint { txid, vout: 0});
+ assert!(ret_split.is_none());
+ }
+
+ #[test]
+ fn test_package_timer() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let revk_outp = dumb_revk_output!(secp_ctx);
+
+ let mut package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, true, 100);
+ let timer_none = package.timer();
+ assert!(timer_none.is_none());
+ package.set_timer(Some(100));
+ if let Some(timer_some) = package.timer() {
+ assert_eq!(timer_some, 100);
+ } else { panic!() }
+ }
+
+ #[test]
+ fn test_package_amounts() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000);
+
+ let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
+ assert_eq!(package.package_amount(), 1000);
+ }
+
+ #[test]
+ fn test_package_weight() {
+ let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
+ let secp_ctx = Secp256k1::new();
+ let revk_outp = dumb_revk_output!(secp_ctx);
+
+ let package = PackageTemplate::build_package(txid, 0, revk_outp, 0, true, 100);
+ // (nVersion (4) + nLocktime (4) + count_tx_in (1) + prevout (36) + sequence (4) + script_length (1) + count_tx_out (1) + value (8) + var_int (1)) * WITNESS_SCALE_FACTOR
+ // + witness marker (2) + WEIGHT_REVOKED_OUTPUT
+ assert_eq!(package.package_weight(&Script::new()), (4 + 4 + 1 + 36 + 4 + 1 + 1 + 8 + 1) * WITNESS_SCALE_FACTOR + 2 + WEIGHT_REVOKED_OUTPUT as usize);
+ }
+}
use chain;
// Maximum size of a serialized HTLCOutputInCommitment
-const HTLC_OUTPUT_IN_COMMITMENT_SIZE: usize = 1 + 8 + 4 + 32 + 5;
+pub(crate) const HTLC_OUTPUT_IN_COMMITMENT_SIZE: usize = 1 + 8 + 4 + 32 + 5;
pub(crate) const MAX_HTLCS: u16 = 483;
pub mod peer_handler;
pub mod chan_utils;
pub mod features;
-pub(crate) mod onchaintx;
#[cfg(feature = "fuzztarget")]
pub mod peer_channel_encryptor;
+++ /dev/null
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-//! The logic to build claims and bump in-flight transactions until confirmations.
-//!
-//! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
-//! building, tracking, bumping and notifications functions.
-
-use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType};
-use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
-use bitcoin::blockdata::script::Script;
-
-use bitcoin::hash_types::Txid;
-
-use bitcoin::secp256k1::{Secp256k1, Signature};
-use bitcoin::secp256k1;
-
-use ln::msgs::DecodeError;
-use ln::PaymentPreimage;
-use ln::chan_utils;
-use ln::chan_utils::{TxCreationKeys, ChannelTransactionParameters, HolderCommitmentTransaction};
-use chain::chaininterface::{FeeEstimator, BroadcasterInterface, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
-use chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER, InputMaterial, ClaimRequest};
-use chain::keysinterface::{Sign, KeysInterface};
-use util::logger::Logger;
-use util::ser::{Readable, ReadableArgs, Writer, Writeable, VecWriter};
-use util::byte_utils;
-
-use std::collections::HashMap;
-use core::cmp;
-use core::ops::Deref;
-use core::mem::replace;
-
-const MAX_ALLOC_SIZE: usize = 64*1024;
-
-/// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
-/// transaction causing it.
-///
-/// Used to determine when the on-chain event can be considered safe from a chain reorganization.
-#[derive(PartialEq)]
-struct OnchainEventEntry {
- txid: Txid,
- height: u32,
- event: OnchainEvent,
-}
-
-impl OnchainEventEntry {
- fn confirmation_threshold(&self) -> u32 {
- self.height + ANTI_REORG_DELAY - 1
- }
-
- fn has_reached_confirmation_threshold(&self, height: u32) -> bool {
- height >= self.confirmation_threshold()
- }
-}
-
-/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
-/// once they mature to enough confirmations (ANTI_REORG_DELAY)
-#[derive(PartialEq)]
-enum OnchainEvent {
- /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
- /// bump-txn candidate buffer.
- Claim {
- claim_request: Txid,
- },
- /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
- /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
- /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
- ContentiousOutpoint {
- outpoint: BitcoinOutPoint,
- input_material: InputMaterial,
- }
-}
-
-/// Higher-level cache structure needed to re-generate bumped claim txn if needed
-#[derive(Clone, PartialEq)]
-pub struct ClaimTxBumpMaterial {
- // At every block tick, used to check if pending claiming tx is taking too
- // much time for confirmation and we need to bump it.
- height_timer: Option<u32>,
- // Tracked in case of reorg to wipe out now-superflous bump material
- feerate_previous: u32,
- // Soonest timelocks among set of outpoints claimed, used to compute
- // a priority of not feerate
- soonest_timelock: u32,
- // Cache of script, pubkey, sig or key to solve claimable outputs scriptpubkey.
- per_input_material: HashMap<BitcoinOutPoint, InputMaterial>,
-}
-
-impl Writeable for ClaimTxBumpMaterial {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- self.height_timer.write(writer)?;
- writer.write_all(&byte_utils::be32_to_array(self.feerate_previous))?;
- writer.write_all(&byte_utils::be32_to_array(self.soonest_timelock))?;
- writer.write_all(&byte_utils::be64_to_array(self.per_input_material.len() as u64))?;
- for (outp, tx_material) in self.per_input_material.iter() {
- outp.write(writer)?;
- tx_material.write(writer)?;
- }
- Ok(())
- }
-}
-
-impl Readable for ClaimTxBumpMaterial {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
- let height_timer = Readable::read(reader)?;
- let feerate_previous = Readable::read(reader)?;
- let soonest_timelock = Readable::read(reader)?;
- let per_input_material_len: u64 = Readable::read(reader)?;
- let mut per_input_material = HashMap::with_capacity(cmp::min(per_input_material_len as usize, MAX_ALLOC_SIZE / 128));
- for _ in 0 ..per_input_material_len {
- let outpoint = Readable::read(reader)?;
- let input_material = Readable::read(reader)?;
- per_input_material.insert(outpoint, input_material);
- }
- Ok(Self { height_timer, feerate_previous, soonest_timelock, per_input_material })
- }
-}
-
-#[derive(PartialEq, Clone, Copy)]
-pub(crate) enum InputDescriptors {
- RevokedOfferedHTLC,
- RevokedReceivedHTLC,
- OfferedHTLC,
- ReceivedHTLC,
- RevokedOutput, // either a revoked to_holder output on commitment tx, a revoked HTLC-Timeout output or a revoked HTLC-Success output
-}
-
-impl Writeable for InputDescriptors {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match self {
- &InputDescriptors::RevokedOfferedHTLC => {
- writer.write_all(&[0; 1])?;
- },
- &InputDescriptors::RevokedReceivedHTLC => {
- writer.write_all(&[1; 1])?;
- },
- &InputDescriptors::OfferedHTLC => {
- writer.write_all(&[2; 1])?;
- },
- &InputDescriptors::ReceivedHTLC => {
- writer.write_all(&[3; 1])?;
- }
- &InputDescriptors::RevokedOutput => {
- writer.write_all(&[4; 1])?;
- }
- }
- Ok(())
- }
-}
-
-impl Readable for InputDescriptors {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
- let input_descriptor = match <u8 as Readable>::read(reader)? {
- 0 => {
- InputDescriptors::RevokedOfferedHTLC
- },
- 1 => {
- InputDescriptors::RevokedReceivedHTLC
- },
- 2 => {
- InputDescriptors::OfferedHTLC
- },
- 3 => {
- InputDescriptors::ReceivedHTLC
- },
- 4 => {
- InputDescriptors::RevokedOutput
- }
- _ => return Err(DecodeError::InvalidValue),
- };
- Ok(input_descriptor)
- }
-}
-
-macro_rules! subtract_high_prio_fee {
- ($logger: ident, $fee_estimator: expr, $value: expr, $predicted_weight: expr, $used_feerate: expr) => {
- {
- $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority).into();
- let mut fee = $used_feerate as u64 * $predicted_weight / 1000;
- if $value <= fee {
- $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal).into();
- fee = $used_feerate as u64 * $predicted_weight / 1000;
- if $value <= fee.into() {
- $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background).into();
- fee = $used_feerate as u64 * $predicted_weight / 1000;
- if $value <= fee {
- log_error!($logger, "Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat)",
- fee, $value);
- false
- } else {
- log_warn!($logger, "Used low priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
- $value);
- $value -= fee;
- true
- }
- } else {
- log_warn!($logger, "Used medium priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
- $value);
- $value -= fee;
- true
- }
- } else {
- $value -= fee;
- true
- }
- }
- }
-}
-
-impl Readable for Option<Vec<Option<(usize, Signature)>>> {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
- match Readable::read(reader)? {
- 0u8 => Ok(None),
- 1u8 => {
- let vlen: u64 = Readable::read(reader)?;
- let mut ret = Vec::with_capacity(cmp::min(vlen as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<Option<(usize, Signature)>>()));
- for _ in 0..vlen {
- ret.push(match Readable::read(reader)? {
- 0u8 => None,
- 1u8 => Some((<u64 as Readable>::read(reader)? as usize, Readable::read(reader)?)),
- _ => return Err(DecodeError::InvalidValue)
- });
- }
- Ok(Some(ret))
- },
- _ => Err(DecodeError::InvalidValue),
- }
- }
-}
-
-impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match self {
- &Some(ref vec) => {
- 1u8.write(writer)?;
- (vec.len() as u64).write(writer)?;
- for opt in vec.iter() {
- match opt {
- &Some((ref idx, ref sig)) => {
- 1u8.write(writer)?;
- (*idx as u64).write(writer)?;
- sig.write(writer)?;
- },
- &None => 0u8.write(writer)?,
- }
- }
- },
- &None => 0u8.write(writer)?,
- }
- Ok(())
- }
-}
-
-
-/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
-/// do RBF bumping if possible.
-pub struct OnchainTxHandler<ChannelSigner: Sign> {
- destination_script: Script,
- holder_commitment: HolderCommitmentTransaction,
- // holder_htlc_sigs and prev_holder_htlc_sigs are in the order as they appear in the commitment
- // transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
- // the set of HTLCs in the HolderCommitmentTransaction.
- holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
- prev_holder_commitment: Option<HolderCommitmentTransaction>,
- prev_holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
-
- signer: ChannelSigner,
- pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
-
- // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
- // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
- // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
- // same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
- // block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
- // equality between spending transaction and claim request. If true, it means transaction was one our claiming one
- // after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
- // we need to regenerate new claim request with reduced set of still-claimable outpoints.
- // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
- // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
- // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
- #[cfg(test)] // Used in functional_test to verify sanitization
- pub pending_claim_requests: HashMap<Txid, ClaimTxBumpMaterial>,
- #[cfg(not(test))]
- pending_claim_requests: HashMap<Txid, ClaimTxBumpMaterial>,
-
- // Used to link outpoints claimed in a connected block to a pending claim request.
- // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
- // Value is (pending claim request identifier, confirmation_block), identifier
- // is txid of the initial claiming transaction and is immutable until outpoint is
- // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
- // block with output gets disconnected.
- #[cfg(test)] // Used in functional_test to verify sanitization
- pub claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
- #[cfg(not(test))]
- claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
-
- onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
-
- latest_height: u32,
-
- secp_ctx: Secp256k1<secp256k1::All>,
-}
-
-const SERIALIZATION_VERSION: u8 = 1;
-const MIN_SERIALIZATION_VERSION: u8 = 1;
-
-impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
- pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
-
- self.destination_script.write(writer)?;
- self.holder_commitment.write(writer)?;
- self.holder_htlc_sigs.write(writer)?;
- self.prev_holder_commitment.write(writer)?;
- self.prev_holder_htlc_sigs.write(writer)?;
-
- self.channel_transaction_parameters.write(writer)?;
-
- let mut key_data = VecWriter(Vec::new());
- self.signer.write(&mut key_data)?;
- assert!(key_data.0.len() < core::usize::MAX);
- assert!(key_data.0.len() < core::u32::MAX as usize);
- (key_data.0.len() as u32).write(writer)?;
- writer.write_all(&key_data.0[..])?;
-
- writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
- for (ref ancestor_claim_txid, claim_tx_data) in self.pending_claim_requests.iter() {
- ancestor_claim_txid.write(writer)?;
- claim_tx_data.write(writer)?;
- }
-
- writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
- for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
- outp.write(writer)?;
- claim_and_height.0.write(writer)?;
- claim_and_height.1.write(writer)?;
- }
-
- writer.write_all(&byte_utils::be64_to_array(self.onchain_events_awaiting_threshold_conf.len() as u64))?;
- for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
- entry.txid.write(writer)?;
- writer.write_all(&byte_utils::be32_to_array(entry.height))?;
- match entry.event {
- OnchainEvent::Claim { ref claim_request } => {
- writer.write_all(&[0; 1])?;
- claim_request.write(writer)?;
- },
- OnchainEvent::ContentiousOutpoint { ref outpoint, ref input_material } => {
- writer.write_all(&[1; 1])?;
- outpoint.write(writer)?;
- input_material.write(writer)?;
- }
- }
- }
- self.latest_height.write(writer)?;
-
- write_tlv_fields!(writer, {}, {});
- Ok(())
- }
-}
-
-impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
- fn read<R: ::std::io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
- let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
-
- let destination_script = Readable::read(reader)?;
-
- let holder_commitment = Readable::read(reader)?;
- let holder_htlc_sigs = Readable::read(reader)?;
- let prev_holder_commitment = Readable::read(reader)?;
- let prev_holder_htlc_sigs = Readable::read(reader)?;
-
- let channel_parameters = Readable::read(reader)?;
-
- let keys_len: u32 = Readable::read(reader)?;
- let mut keys_data = Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE));
- while keys_data.len() != keys_len as usize {
- // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
- let mut data = [0; 1024];
- let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.len())];
- reader.read_exact(read_slice)?;
- keys_data.extend_from_slice(read_slice);
- }
- let signer = keys_manager.read_chan_signer(&keys_data)?;
-
- let pending_claim_requests_len: u64 = Readable::read(reader)?;
- let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
- for _ in 0..pending_claim_requests_len {
- pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
- }
-
- let claimable_outpoints_len: u64 = Readable::read(reader)?;
- let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
- for _ in 0..claimable_outpoints_len {
- let outpoint = Readable::read(reader)?;
- let ancestor_claim_txid = Readable::read(reader)?;
- let height = Readable::read(reader)?;
- claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
- }
- let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
- let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
- for _ in 0..waiting_threshold_conf_len {
- let txid = Readable::read(reader)?;
- let height = Readable::read(reader)?;
- let event = match <u8 as Readable>::read(reader)? {
- 0 => {
- let claim_request = Readable::read(reader)?;
- OnchainEvent::Claim {
- claim_request
- }
- },
- 1 => {
- let outpoint = Readable::read(reader)?;
- let input_material = Readable::read(reader)?;
- OnchainEvent::ContentiousOutpoint {
- outpoint,
- input_material
- }
- }
- _ => return Err(DecodeError::InvalidValue),
- };
- onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { txid, height, event });
- }
- let latest_height = Readable::read(reader)?;
-
- read_tlv_fields!(reader, {}, {});
-
- let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
-
- Ok(OnchainTxHandler {
- destination_script,
- holder_commitment,
- holder_htlc_sigs,
- prev_holder_commitment,
- prev_holder_htlc_sigs,
- signer,
- channel_transaction_parameters: channel_parameters,
- claimable_outpoints,
- pending_claim_requests,
- onchain_events_awaiting_threshold_conf,
- latest_height,
- secp_ctx,
- })
- }
-}
-
-impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
- pub(crate) fn new(destination_script: Script, signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1<secp256k1::All>) -> Self {
- OnchainTxHandler {
- destination_script,
- holder_commitment,
- holder_htlc_sigs: None,
- prev_holder_commitment: None,
- prev_holder_htlc_sigs: None,
- signer,
- channel_transaction_parameters: channel_parameters,
- pending_claim_requests: HashMap::new(),
- claimable_outpoints: HashMap::new(),
- onchain_events_awaiting_threshold_conf: Vec::new(),
- latest_height: 0,
-
- secp_ctx,
- }
- }
-
- pub(crate) fn get_witnesses_weight(inputs: &[InputDescriptors]) -> usize {
- let mut tx_weight = 2; // count segwit flags
- for inp in inputs {
- // We use expected weight (and not actual) as signatures and time lock delays may vary
- tx_weight += match inp {
- // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
- &InputDescriptors::RevokedOfferedHTLC => {
- 1 + 1 + 73 + 1 + 33 + 1 + 133
- },
- // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
- &InputDescriptors::RevokedReceivedHTLC => {
- 1 + 1 + 73 + 1 + 33 + 1 + 139
- },
- // number_of_witness_elements + sig_length + counterpartyhtlc_sig + preimage_length + preimage + witness_script_length + witness_script
- &InputDescriptors::OfferedHTLC => {
- 1 + 1 + 73 + 1 + 32 + 1 + 133
- },
- // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
- &InputDescriptors::ReceivedHTLC => {
- 1 + 1 + 73 + 1 + 1 + 1 + 139
- },
- // number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
- &InputDescriptors::RevokedOutput => {
- 1 + 1 + 73 + 1 + 1 + 1 + 77
- },
- };
- }
- tx_weight
- }
-
- /// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
- /// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
- /// height than once reached we should generate a new bumped "version" of the claim tx to be sure than we safely claim outputs before
- /// than our counterparty can do it too. If timelock expires soon, height timer is going to be scale down in consequence to increase
- /// frequency of the bump and so increase our bets of success.
- fn get_height_timer(current_height: u32, timelock_expiration: u32) -> u32 {
- if timelock_expiration <= current_height + 3 {
- return current_height + 1
- } else if timelock_expiration - current_height <= 15 {
- return current_height + 3
- }
- current_height + 15
- }
-
- /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
- /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
- /// Panics if there are signing errors, because signing operations in reaction to on-chain events
- /// are not expected to fail, and if they do, we may lose funds.
- fn generate_claim_tx<F: Deref, L: Deref>(&mut self, height: u32, cached_claim_datas: &ClaimTxBumpMaterial, fee_estimator: &F, logger: &L) -> Option<(Option<u32>, u32, Transaction)>
- where F::Target: FeeEstimator,
- L::Target: Logger,
- {
- if cached_claim_datas.per_input_material.len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
- let mut inputs = Vec::new();
- for outp in cached_claim_datas.per_input_material.keys() {
- log_trace!(logger, "Outpoint {}:{}", outp.txid, outp.vout);
- inputs.push(TxIn {
- previous_output: *outp,
- script_sig: Script::new(),
- sequence: 0xfffffffd,
- witness: Vec::new(),
- });
- }
- let mut bumped_tx = Transaction {
- version: 2,
- lock_time: 0,
- input: inputs,
- output: vec![TxOut {
- script_pubkey: self.destination_script.clone(),
- value: 0
- }],
- };
-
- macro_rules! RBF_bump {
- ($amount: expr, $old_feerate: expr, $fee_estimator: expr, $predicted_weight: expr) => {
- {
- let mut used_feerate: u32;
- // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
- let new_fee = if $old_feerate < $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) {
- let mut value = $amount;
- if subtract_high_prio_fee!(logger, $fee_estimator, value, $predicted_weight, used_feerate) {
- // Overflow check is done in subtract_high_prio_fee
- ($amount - value)
- } else {
- log_trace!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", $amount);
- return None;
- }
- // ...else just increase the previous feerate by 25% (because that's a nice number)
- } else {
- let fee = $old_feerate as u64 * ($predicted_weight as u64) / 750;
- if $amount <= fee {
- log_trace!(logger, "Can't 25% bump new claiming tx, amount {} is too small", $amount);
- return None;
- }
- fee
- };
-
- let previous_fee = $old_feerate as u64 * ($predicted_weight as u64) / 1000;
- let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * ($predicted_weight as u64) / 1000;
- // BIP 125 Opt-in Full Replace-by-Fee Signaling
- // * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions.
- // * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting.
- let new_fee = if new_fee < previous_fee + min_relay_fee {
- new_fee + previous_fee + min_relay_fee - new_fee
- } else {
- new_fee
- };
- Some((new_fee, new_fee * 1000 / ($predicted_weight as u64)))
- }
- }
- }
-
- // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
- // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
- let new_timer = Some(Self::get_height_timer(height, cached_claim_datas.soonest_timelock));
- let mut inputs_witnesses_weight = 0;
- let mut amt = 0;
- let mut dynamic_fee = true;
- for per_outp_material in cached_claim_datas.per_input_material.values() {
- match per_outp_material {
- &InputMaterial::Revoked { ref input_descriptor, ref amount, .. } => {
- inputs_witnesses_weight += Self::get_witnesses_weight(&[*input_descriptor]);
- amt += *amount;
- },
- &InputMaterial::CounterpartyHTLC { ref preimage, ref htlc, .. } => {
- inputs_witnesses_weight += Self::get_witnesses_weight(if preimage.is_some() { &[InputDescriptors::OfferedHTLC] } else { &[InputDescriptors::ReceivedHTLC] });
- amt += htlc.amount_msat / 1000;
- },
- &InputMaterial::HolderHTLC { .. } => {
- dynamic_fee = false;
- },
- &InputMaterial::Funding { .. } => {
- dynamic_fee = false;
- }
- }
- }
- if dynamic_fee {
- let predicted_weight = (bumped_tx.get_weight() + inputs_witnesses_weight) as u64;
- let mut new_feerate;
- // If old feerate is 0, first iteration of this claim, use normal fee calculation
- if cached_claim_datas.feerate_previous != 0 {
- if let Some((new_fee, feerate)) = RBF_bump!(amt, cached_claim_datas.feerate_previous, fee_estimator, predicted_weight) {
- // If new computed fee is superior at the whole claimable amount burn all in fees
- if new_fee as u64 > amt {
- bumped_tx.output[0].value = 0;
- } else {
- bumped_tx.output[0].value = amt - new_fee as u64;
- }
- new_feerate = feerate;
- } else { return None; }
- } else {
- if subtract_high_prio_fee!(logger, fee_estimator, amt, predicted_weight, new_feerate) {
- bumped_tx.output[0].value = amt;
- } else { return None; }
- }
- assert!(new_feerate != 0);
-
- for (i, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() {
- match per_outp_material {
- &InputMaterial::Revoked { ref per_commitment_point, ref counterparty_delayed_payment_base_key, ref counterparty_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc, ref on_counterparty_tx_csv } => {
- if let Ok(tx_keys) = TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, counterparty_delayed_payment_base_key, counterparty_htlc_base_key, &self.signer.pubkeys().revocation_basepoint, &self.signer.pubkeys().htlc_basepoint) {
-
- let witness_script = if let Some(ref htlc) = *htlc {
- chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &tx_keys.broadcaster_htlc_key, &tx_keys.countersignatory_htlc_key, &tx_keys.revocation_key)
- } else {
- chan_utils::get_revokeable_redeemscript(&tx_keys.revocation_key, *on_counterparty_tx_csv, &tx_keys.broadcaster_delayed_payment_key)
- };
-
- let sig = if let Some(ref htlc) = *htlc {
- self.signer.sign_justice_revoked_htlc(&bumped_tx, i, *amount, &per_commitment_key, &htlc, &self.secp_ctx).expect("sign justice tx")
- } else {
- self.signer.sign_justice_revoked_output(&bumped_tx, i, *amount, &per_commitment_key, &self.secp_ctx).expect("sign justice tx")
- };
- bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
- bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
- if htlc.is_some() {
- bumped_tx.input[i].witness.push(tx_keys.revocation_key.clone().serialize().to_vec());
- } else {
- bumped_tx.input[i].witness.push(vec!(1));
- }
- bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
-
- log_trace!(logger, "Going to broadcast Penalty Transaction {} claiming revoked {} output {} from {} with new feerate {}...", bumped_tx.txid(), if *input_descriptor == InputDescriptors::RevokedOutput { "to_holder" } else if *input_descriptor == InputDescriptors::RevokedOfferedHTLC { "offered" } else if *input_descriptor == InputDescriptors::RevokedReceivedHTLC { "received" } else { "" }, outp.vout, outp.txid, new_feerate);
- }
- },
- &InputMaterial::CounterpartyHTLC { ref per_commitment_point, ref counterparty_delayed_payment_base_key, ref counterparty_htlc_base_key, ref preimage, ref htlc } => {
- if let Ok(tx_keys) = TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, counterparty_delayed_payment_base_key, counterparty_htlc_base_key, &self.signer.pubkeys().revocation_basepoint, &self.signer.pubkeys().htlc_basepoint) {
- let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &tx_keys.broadcaster_htlc_key, &tx_keys.countersignatory_htlc_key, &tx_keys.revocation_key);
-
- if !preimage.is_some() { bumped_tx.lock_time = htlc.cltv_expiry }; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
- let sig = self.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &htlc.amount_msat / 1000, &per_commitment_point, htlc, &self.secp_ctx).expect("sign counterparty HTLC tx");
- bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
- bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
- if let &Some(preimage) = preimage {
- bumped_tx.input[i].witness.push(preimage.0.to_vec());
- } else {
- // Due to BIP146 (MINIMALIF) this must be a zero-length element to relay.
- bumped_tx.input[i].witness.push(vec![]);
- }
- bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
- log_trace!(logger, "Going to broadcast Claim Transaction {} claiming counterparty {} htlc output {} from {} with new feerate {}...", bumped_tx.txid(), if preimage.is_some() { "offered" } else { "received" }, outp.vout, outp.txid, new_feerate);
- }
- },
- _ => unreachable!()
- }
- }
- log_trace!(logger, "...with timer {}", new_timer.unwrap());
- assert!(predicted_weight >= bumped_tx.get_weight() as u64);
- return Some((new_timer, new_feerate as u32, bumped_tx))
- } else {
- for (_, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() {
- match per_outp_material {
- &InputMaterial::HolderHTLC { ref preimage, ref amount } => {
- let htlc_tx = self.get_fully_signed_htlc_tx(outp, preimage);
- if let Some(htlc_tx) = htlc_tx {
- let feerate = (amount - htlc_tx.output[0].value) * 1000 / htlc_tx.get_weight() as u64;
- // Timer set to $NEVER given we can't bump tx without anchor outputs
- log_trace!(logger, "Going to broadcast Holder HTLC-{} claiming HTLC output {} from {}...", if preimage.is_some() { "Success" } else { "Timeout" }, outp.vout, outp.txid);
- return Some((None, feerate as u32, htlc_tx));
- }
- return None;
- },
- &InputMaterial::Funding { ref funding_redeemscript } => {
- let signed_tx = self.get_fully_signed_holder_tx(funding_redeemscript);
- // Timer set to $NEVER given we can't bump tx without anchor outputs
- log_trace!(logger, "Going to broadcast Holder Transaction {} claiming funding output {} from {}...", signed_tx.txid(), outp.vout, outp.txid);
- return Some((None, self.holder_commitment.feerate_per_kw(), signed_tx));
- }
- _ => unreachable!()
- }
- }
- }
- None
- }
-
- /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
- /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
- /// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
- /// if we receive a preimage after force-close.
- pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, latest_height: Option<u32>, broadcaster: &B, fee_estimator: &F, logger: &L)
- where B::Target: BroadcasterInterface,
- F::Target: FeeEstimator,
- L::Target: Logger,
- {
- let height = match latest_height {
- Some(h) => h,
- None => self.latest_height,
- };
- log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), claimable_outpoints.len());
- let mut new_claims = Vec::new();
- let mut aggregated_claim = HashMap::new();
- let mut aggregated_soonest = ::core::u32::MAX;
-
- // Try to aggregate outputs if their timelock expiration isn't imminent (absolute_timelock
- // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
- for req in claimable_outpoints {
- // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
- if let Some(_) = self.claimable_outpoints.get(&req.outpoint) { log_trace!(logger, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoint.txid, req.outpoint.vout); } else {
- log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.absolute_timelock, height + CLTV_SHARED_CLAIM_BUFFER);
- if req.absolute_timelock <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
- let mut single_input = HashMap::new();
- single_input.insert(req.outpoint, req.witness_data);
- new_claims.push((req.absolute_timelock, single_input));
- } else {
- aggregated_claim.insert(req.outpoint, req.witness_data);
- if req.absolute_timelock < aggregated_soonest {
- aggregated_soonest = req.absolute_timelock;
- }
- }
- }
- }
- new_claims.push((aggregated_soonest, aggregated_claim));
-
- // Generate claim transactions and track them to bump if necessary at
- // height timer expiration (i.e in how many blocks we're going to take action).
- for (soonest_timelock, claim) in new_claims.drain(..) {
- let mut claim_material = ClaimTxBumpMaterial { height_timer: None, feerate_previous: 0, soonest_timelock, per_input_material: claim };
- if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator, &*logger) {
- claim_material.height_timer = new_timer;
- claim_material.feerate_previous = new_feerate;
- let txid = tx.txid();
- for k in claim_material.per_input_material.keys() {
- log_trace!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
- self.claimable_outpoints.insert(k.clone(), (txid, height));
- }
- self.pending_claim_requests.insert(txid, claim_material);
- log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
- broadcaster.broadcast_transaction(&tx);
- }
- }
-
- let mut bump_candidates = HashMap::new();
- for tx in txn_matched {
- // Scan all input to verify is one of the outpoint spent is of interest for us
- let mut claimed_outputs_material = Vec::new();
- for inp in &tx.input {
- if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
- // If outpoint has claim request pending on it...
- if let Some(claim_material) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
- //... we need to verify equality between transaction outpoints and claim request
- // outpoints to know if transaction is the original claim or a bumped one issued
- // by us.
- let mut set_equality = true;
- if claim_material.per_input_material.len() != tx.input.len() {
- set_equality = false;
- } else {
- for (claim_inp, tx_inp) in claim_material.per_input_material.keys().zip(tx.input.iter()) {
- if *claim_inp != tx_inp.previous_output {
- set_equality = false;
- }
- }
- }
-
- macro_rules! clean_claim_request_after_safety_delay {
- () => {
- let entry = OnchainEventEntry {
- txid: tx.txid(),
- height,
- event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }
- };
- if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
- }
-
- // If this is our transaction (or our counterparty spent all the outputs
- // before we could anyway with same inputs order than us), wait for
- // ANTI_REORG_DELAY and clean the RBF tracking map.
- if set_equality {
- clean_claim_request_after_safety_delay!();
- } else { // If false, generate new claim request with update outpoint set
- let mut at_least_one_drop = false;
- for input in tx.input.iter() {
- if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) {
- claimed_outputs_material.push((input.previous_output, input_material));
- at_least_one_drop = true;
- }
- // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
- if claim_material.per_input_material.is_empty() {
- clean_claim_request_after_safety_delay!();
- }
- }
- //TODO: recompute soonest_timelock to avoid wasting a bit on fees
- if at_least_one_drop {
- bump_candidates.insert(first_claim_txid_height.0.clone(), claim_material.clone());
- }
- }
- break; //No need to iterate further, either tx is our or their
- } else {
- panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
- }
- }
- }
- for (outpoint, input_material) in claimed_outputs_material.drain(..) {
- let entry = OnchainEventEntry {
- txid: tx.txid(),
- height,
- event: OnchainEvent::ContentiousOutpoint { outpoint, input_material },
- };
- if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
- }
-
- // After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
- let onchain_events_awaiting_threshold_conf =
- self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
- for entry in onchain_events_awaiting_threshold_conf {
- if entry.has_reached_confirmation_threshold(height) {
- match entry.event {
- OnchainEvent::Claim { claim_request } => {
- // We may remove a whole set of claim outpoints here, as these one may have
- // been aggregated in a single tx and claimed so atomically
- if let Some(bump_material) = self.pending_claim_requests.remove(&claim_request) {
- for outpoint in bump_material.per_input_material.keys() {
- self.claimable_outpoints.remove(&outpoint);
- }
- }
- },
- OnchainEvent::ContentiousOutpoint { outpoint, .. } => {
- self.claimable_outpoints.remove(&outpoint);
- }
- }
- } else {
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
-
- // Check if any pending claim request must be rescheduled
- for (first_claim_txid, ref claim_data) in self.pending_claim_requests.iter() {
- if let Some(height_timer) = claim_data.height_timer {
- if height >= height_timer {
- bump_candidates.insert(*first_claim_txid, (*claim_data).clone());
- }
- }
- }
-
- // Build, bump and rebroadcast tx accordingly
- log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
- for (first_claim_txid, claim_material) in bump_candidates.iter() {
- if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator, &*logger) {
- log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
- broadcaster.broadcast_transaction(&bump_tx);
- if let Some(claim_material) = self.pending_claim_requests.get_mut(first_claim_txid) {
- claim_material.height_timer = new_timer;
- claim_material.feerate_previous = new_feerate;
- }
- }
- }
- }
-
- pub(crate) fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
- &mut self,
- txid: &Txid,
- broadcaster: B,
- fee_estimator: F,
- logger: L,
- ) where
- B::Target: BroadcasterInterface,
- F::Target: FeeEstimator,
- L::Target: Logger,
- {
- let mut height = None;
- for entry in self.onchain_events_awaiting_threshold_conf.iter() {
- if entry.txid == *txid {
- height = Some(entry.height);
- break;
- }
- }
-
- if let Some(height) = height {
- self.block_disconnected(height, broadcaster, fee_estimator, logger);
- }
- }
-
- pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F, logger: L)
- where B::Target: BroadcasterInterface,
- F::Target: FeeEstimator,
- L::Target: Logger,
- {
- let mut bump_candidates = HashMap::new();
- let onchain_events_awaiting_threshold_conf =
- self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
- for entry in onchain_events_awaiting_threshold_conf {
- if entry.height >= height {
- //- our claim tx on a commitment tx output
- //- resurect outpoint back in its claimable set and regenerate tx
- match entry.event {
- OnchainEvent::ContentiousOutpoint { outpoint, input_material } => {
- if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&outpoint) {
- if let Some(claim_material) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
- claim_material.per_input_material.insert(outpoint, input_material);
- // Using a HashMap guarantee us than if we have multiple outpoints getting
- // resurrected only one bump claim tx is going to be broadcast
- bump_candidates.insert(ancestor_claimable_txid.clone(), claim_material.clone());
- }
- }
- },
- _ => {},
- }
- } else {
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
- for (_, claim_material) in bump_candidates.iter_mut() {
- if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &&*fee_estimator, &&*logger) {
- claim_material.height_timer = new_timer;
- claim_material.feerate_previous = new_feerate;
- log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
- broadcaster.broadcast_transaction(&bump_tx);
- }
- }
- for (ancestor_claim_txid, claim_material) in bump_candidates.drain() {
- self.pending_claim_requests.insert(ancestor_claim_txid.0, claim_material);
- }
- //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
- // right now if one of the outpoint get disconnected, just erase whole pending claim request.
- let mut remove_request = Vec::new();
- self.claimable_outpoints.retain(|_, ref v|
- if v.1 >= height {
- remove_request.push(v.0.clone());
- false
- } else { true });
- for req in remove_request {
- self.pending_claim_requests.remove(&req);
- }
- }
-
- pub(crate) fn get_relevant_txids(&self) -> Vec<Txid> {
- let mut txids: Vec<Txid> = self.onchain_events_awaiting_threshold_conf
- .iter()
- .map(|entry| entry.txid)
- .collect();
- txids.sort_unstable();
- txids.dedup();
- txids
- }
-
- pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
- self.prev_holder_commitment = Some(replace(&mut self.holder_commitment, tx));
- self.holder_htlc_sigs = None;
- }
-
- // Normally holder HTLCs are signed at the same time as the holder commitment tx. However,
- // in some configurations, the holder commitment tx has been signed and broadcast by a
- // ChannelMonitor replica, so we handle that case here.
- fn sign_latest_holder_htlcs(&mut self) {
- if self.holder_htlc_sigs.is_none() {
- let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
- self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, sigs));
- }
- }
-
- // Normally only the latest commitment tx and HTLCs need to be signed. However, in some
- // configurations we may have updated our holder commitment but a replica of the ChannelMonitor
- // broadcast the previous one before we sync with it. We handle that case here.
- fn sign_prev_holder_htlcs(&mut self) {
- if self.prev_holder_htlc_sigs.is_none() {
- if let Some(ref holder_commitment) = self.prev_holder_commitment {
- let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(holder_commitment, &self.secp_ctx).expect("sign previous holder commitment");
- self.prev_holder_htlc_sigs = Some(Self::extract_holder_sigs(holder_commitment, sigs));
- }
- }
- }
-
- fn extract_holder_sigs(holder_commitment: &HolderCommitmentTransaction, sigs: Vec<Signature>) -> Vec<Option<(usize, Signature)>> {
- let mut ret = Vec::new();
- for (htlc_idx, (holder_sig, htlc)) in sigs.iter().zip(holder_commitment.htlcs().iter()).enumerate() {
- let tx_idx = htlc.transaction_output_index.unwrap();
- if ret.len() <= tx_idx as usize { ret.resize(tx_idx as usize + 1, None); }
- ret[tx_idx as usize] = Some((htlc_idx, holder_sig.clone()));
- }
- ret
- }
-
- //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
- // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
- // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
- // to monitor before.
- pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
- let (sig, htlc_sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("signing holder commitment");
- self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
- self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
- }
-
- #[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
- pub(crate) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
- let (sig, htlc_sigs) = self.signer.unsafe_sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
- self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
- self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
- }
-
- pub(crate) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
- let mut htlc_tx = None;
- let commitment_txid = self.holder_commitment.trust().txid();
- // Check if the HTLC spends from the current holder commitment
- if commitment_txid == outp.txid {
- self.sign_latest_holder_htlcs();
- if let &Some(ref htlc_sigs) = &self.holder_htlc_sigs {
- let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
- let trusted_tx = self.holder_commitment.trust();
- let counterparty_htlc_sig = self.holder_commitment.counterparty_htlc_sigs[*htlc_idx];
- htlc_tx = Some(trusted_tx
- .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
- }
- }
- // If the HTLC doesn't spend the current holder commitment, check if it spends the previous one
- if htlc_tx.is_none() && self.prev_holder_commitment.is_some() {
- let commitment_txid = self.prev_holder_commitment.as_ref().unwrap().trust().txid();
- if commitment_txid == outp.txid {
- self.sign_prev_holder_htlcs();
- if let &Some(ref htlc_sigs) = &self.prev_holder_htlc_sigs {
- let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
- let holder_commitment = self.prev_holder_commitment.as_ref().unwrap();
- let trusted_tx = holder_commitment.trust();
- let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[*htlc_idx];
- htlc_tx = Some(trusted_tx
- .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
- }
- }
- }
- htlc_tx
- }
-
- #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
- pub(crate) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
- let latest_had_sigs = self.holder_htlc_sigs.is_some();
- let prev_had_sigs = self.prev_holder_htlc_sigs.is_some();
- let ret = self.get_fully_signed_htlc_tx(outp, preimage);
- if !latest_had_sigs {
- self.holder_htlc_sigs = None;
- }
- if !prev_had_sigs {
- self.prev_holder_htlc_sigs = None;
- }
- ret
- }
-}