use ln::chan_utils;
use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCType, ChannelTransactionParameters, HolderCommitmentTransaction};
use ln::channelmanager::{BestBlock, HTLCSource};
-use ln::onchaintx::OnchainTxHandler;
-use ln::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
use chain;
use chain::WatchedOutput;
use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use chain::transaction::{OutPoint, TransactionData};
use chain::keysinterface::{SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, Sign, KeysInterface};
+use chain::onchaintx::OnchainTxHandler;
+use chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
use chain::Filter;
use util::logger::Logger;
use util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, U48};
use bitcoin::network::constants::Network;
use hex;
use chain::channelmonitor::ChannelMonitor;
- use chain::onchain_utils::{WEIGHT_OFFERED_HTLC, WEIGHT_RECEIVED_HTLC, WEIGHT_REVOKED_OFFERED_HTLC, WEIGHT_REVOKED_RECEIVED_HTLC, WEIGHT_REVOKED_OUTPUT};
+ use chain::package::{WEIGHT_OFFERED_HTLC, WEIGHT_RECEIVED_HTLC, WEIGHT_REVOKED_OFFERED_HTLC, WEIGHT_REVOKED_RECEIVED_HTLC, WEIGHT_REVOKED_OUTPUT};
use chain::transaction::OutPoint;
use ln::{PaymentPreimage, PaymentHash};
use ln::channelmanager::BestBlock;
- use ln::package;
- use ln::package::InputDescriptors;
use ln::chan_utils;
use ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
pub mod channelmonitor;
pub mod transaction;
pub mod keysinterface;
+pub(crate) mod onchaintx;
+pub(crate) mod package;
/// An error when accessing the chain via [`Access`].
#[derive(Clone)]
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! The logic to build claims and bump in-flight transactions until confirmations.
+//!
+//! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
+//! building, tracking, bumping and notifications functions.
+
+use bitcoin::blockdata::transaction::Transaction;
+use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+use bitcoin::blockdata::script::Script;
+
+use bitcoin::hash_types::Txid;
+
+use bitcoin::secp256k1::{Secp256k1, Signature};
+use bitcoin::secp256k1;
+
+use ln::msgs::DecodeError;
+use ln::PaymentPreimage;
+use ln::chan_utils::{ChannelTransactionParameters, HolderCommitmentTransaction};
+use chain::chaininterface::{FeeEstimator, BroadcasterInterface};
+use chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
+use chain::keysinterface::{Sign, KeysInterface};
+use chain::package::PackageTemplate;
+use chain::package;
+use util::logger::Logger;
+use util::ser::{Readable, ReadableArgs, Writer, Writeable, VecWriter};
+use util::byte_utils;
+
+use std::collections::HashMap;
+use core::cmp;
+use core::ops::Deref;
+use core::mem::replace;
+
+const MAX_ALLOC_SIZE: usize = 64*1024;
+
+/// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
+/// transaction causing it.
+///
+/// Used to determine when the on-chain event can be considered safe from a chain reorganization.
+#[derive(PartialEq)]
+struct OnchainEventEntry {
+ txid: Txid,
+ height: u32,
+ event: OnchainEvent,
+}
+
+impl OnchainEventEntry {
+ fn confirmation_threshold(&self) -> u32 {
+ self.height + ANTI_REORG_DELAY - 1
+ }
+
+ fn has_reached_confirmation_threshold(&self, height: u32) -> bool {
+ height >= self.confirmation_threshold()
+ }
+}
+
+/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
+/// once they mature to enough confirmations (ANTI_REORG_DELAY)
+#[derive(PartialEq)]
+enum OnchainEvent {
+ /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
+ /// bump-txn candidate buffer.
+ Claim {
+ claim_request: Txid,
+ },
+ /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
+ /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
+ /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
+ ContentiousOutpoint {
+ package: PackageTemplate,
+ }
+}
+
+impl Readable for Option<Vec<Option<(usize, Signature)>>> {
+ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ match Readable::read(reader)? {
+ 0u8 => Ok(None),
+ 1u8 => {
+ let vlen: u64 = Readable::read(reader)?;
+ let mut ret = Vec::with_capacity(cmp::min(vlen as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<Option<(usize, Signature)>>()));
+ for _ in 0..vlen {
+ ret.push(match Readable::read(reader)? {
+ 0u8 => None,
+ 1u8 => Some((<u64 as Readable>::read(reader)? as usize, Readable::read(reader)?)),
+ _ => return Err(DecodeError::InvalidValue)
+ });
+ }
+ Ok(Some(ret))
+ },
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &Some(ref vec) => {
+ 1u8.write(writer)?;
+ (vec.len() as u64).write(writer)?;
+ for opt in vec.iter() {
+ match opt {
+ &Some((ref idx, ref sig)) => {
+ 1u8.write(writer)?;
+ (*idx as u64).write(writer)?;
+ sig.write(writer)?;
+ },
+ &None => 0u8.write(writer)?,
+ }
+ }
+ },
+ &None => 0u8.write(writer)?,
+ }
+ Ok(())
+ }
+}
+
+
+/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
+/// do RBF bumping if possible.
+pub struct OnchainTxHandler<ChannelSigner: Sign> {
+ destination_script: Script,
+ holder_commitment: HolderCommitmentTransaction,
+ // holder_htlc_sigs and prev_holder_htlc_sigs are in the order as they appear in the commitment
+ // transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
+ // the set of HTLCs in the HolderCommitmentTransaction.
+ holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
+ prev_holder_commitment: Option<HolderCommitmentTransaction>,
+ prev_holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
+
+ pub(super) signer: ChannelSigner,
+ pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
+
+ // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
+ // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
+ // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
+ // same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
+ // block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
+ // equality between spending transaction and claim request. If true, it means transaction was one our claiming one
+ // after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
+ // we need to regenerate new claim request with reduced set of still-claimable outpoints.
+ // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
+ // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
+ // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
+ #[cfg(test)] // Used in functional_test to verify sanitization
+ pub(crate) pending_claim_requests: HashMap<Txid, PackageTemplate>,
+ #[cfg(not(test))]
+ pending_claim_requests: HashMap<Txid, PackageTemplate>,
+
+ // Used to link outpoints claimed in a connected block to a pending claim request.
+ // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
+ // Value is (pending claim request identifier, confirmation_block), identifier
+ // is txid of the initial claiming transaction and is immutable until outpoint is
+ // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
+ // block with output gets disconnected.
+ #[cfg(test)] // Used in functional_test to verify sanitization
+ pub claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
+ #[cfg(not(test))]
+ claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
+
+ onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
+
+ latest_height: u32,
+
+ pub(super) secp_ctx: Secp256k1<secp256k1::All>,
+}
+
+const SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 1;
+
+impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
+ pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
+
+ self.destination_script.write(writer)?;
+ self.holder_commitment.write(writer)?;
+ self.holder_htlc_sigs.write(writer)?;
+ self.prev_holder_commitment.write(writer)?;
+ self.prev_holder_htlc_sigs.write(writer)?;
+
+ self.channel_transaction_parameters.write(writer)?;
+
+ let mut key_data = VecWriter(Vec::new());
+ self.signer.write(&mut key_data)?;
+ assert!(key_data.0.len() < core::usize::MAX);
+ assert!(key_data.0.len() < core::u32::MAX as usize);
+ (key_data.0.len() as u32).write(writer)?;
+ writer.write_all(&key_data.0[..])?;
+
+ writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
+ for (ref ancestor_claim_txid, request) in self.pending_claim_requests.iter() {
+ ancestor_claim_txid.write(writer)?;
+ request.write(writer)?;
+ }
+
+ writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
+ for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
+ outp.write(writer)?;
+ claim_and_height.0.write(writer)?;
+ claim_and_height.1.write(writer)?;
+ }
+
+ writer.write_all(&byte_utils::be64_to_array(self.onchain_events_awaiting_threshold_conf.len() as u64))?;
+ for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
+ entry.txid.write(writer)?;
+ writer.write_all(&byte_utils::be32_to_array(entry.height))?;
+ match entry.event {
+ OnchainEvent::Claim { ref claim_request } => {
+ writer.write_all(&[0; 1])?;
+ claim_request.write(writer)?;
+ },
+ OnchainEvent::ContentiousOutpoint { ref package } => {
+ writer.write_all(&[1; 1])?;
+ package.write(writer)?;
+ }
+ }
+ }
+ self.latest_height.write(writer)?;
+
+ write_tlv_fields!(writer, {}, {});
+ Ok(())
+ }
+}
+
+impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
+ fn read<R: ::std::io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
+ let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
+
+ let destination_script = Readable::read(reader)?;
+
+ let holder_commitment = Readable::read(reader)?;
+ let holder_htlc_sigs = Readable::read(reader)?;
+ let prev_holder_commitment = Readable::read(reader)?;
+ let prev_holder_htlc_sigs = Readable::read(reader)?;
+
+ let channel_parameters = Readable::read(reader)?;
+
+ let keys_len: u32 = Readable::read(reader)?;
+ let mut keys_data = Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE));
+ while keys_data.len() != keys_len as usize {
+ // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
+ let mut data = [0; 1024];
+ let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.len())];
+ reader.read_exact(read_slice)?;
+ keys_data.extend_from_slice(read_slice);
+ }
+ let signer = keys_manager.read_chan_signer(&keys_data)?;
+
+ let pending_claim_requests_len: u64 = Readable::read(reader)?;
+ let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..pending_claim_requests_len {
+ pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
+ }
+
+ let claimable_outpoints_len: u64 = Readable::read(reader)?;
+ let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..claimable_outpoints_len {
+ let outpoint = Readable::read(reader)?;
+ let ancestor_claim_txid = Readable::read(reader)?;
+ let height = Readable::read(reader)?;
+ claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
+ }
+ let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
+ let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..waiting_threshold_conf_len {
+ let txid = Readable::read(reader)?;
+ let height = Readable::read(reader)?;
+ let event = match <u8 as Readable>::read(reader)? {
+ 0 => {
+ let claim_request = Readable::read(reader)?;
+ OnchainEvent::Claim {
+ claim_request
+ }
+ },
+ 1 => {
+ let package = Readable::read(reader)?;
+ OnchainEvent::ContentiousOutpoint {
+ package
+ }
+ }
+ _ => return Err(DecodeError::InvalidValue),
+ };
+ onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { txid, height, event });
+ }
+ let latest_height = Readable::read(reader)?;
+
+ read_tlv_fields!(reader, {}, {});
+
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
+
+ Ok(OnchainTxHandler {
+ destination_script,
+ holder_commitment,
+ holder_htlc_sigs,
+ prev_holder_commitment,
+ prev_holder_htlc_sigs,
+ signer,
+ channel_transaction_parameters: channel_parameters,
+ claimable_outpoints,
+ pending_claim_requests,
+ onchain_events_awaiting_threshold_conf,
+ latest_height,
+ secp_ctx,
+ })
+ }
+}
+
+impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
+ pub(crate) fn new(destination_script: Script, signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1<secp256k1::All>) -> Self {
+ OnchainTxHandler {
+ destination_script,
+ holder_commitment,
+ holder_htlc_sigs: None,
+ prev_holder_commitment: None,
+ prev_holder_htlc_sigs: None,
+ signer,
+ channel_transaction_parameters: channel_parameters,
+ pending_claim_requests: HashMap::new(),
+ claimable_outpoints: HashMap::new(),
+ onchain_events_awaiting_threshold_conf: Vec::new(),
+ latest_height: 0,
+
+ secp_ctx,
+ }
+ }
+
+ /// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
+ /// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
+ /// height than once reached we should generate a new bumped "version" of the claim tx to be sure than we safely claim outputs before
+ /// than our counterparty can do it too. If timelock expires soon, height timer is going to be scale down in consequence to increase
+ /// frequency of the bump and so increase our bets of success.
+ fn get_height_timer(current_height: u32, timelock_expiration: u32) -> u32 {
+ if timelock_expiration <= current_height + 3 {
+ return current_height + 1
+ } else if timelock_expiration - current_height <= 15 {
+ return current_height + 3
+ }
+ current_height + 15
+ }
+
+ /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
+ /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
+ /// Panics if there are signing errors, because signing operations in reaction to on-chain events
+ /// are not expected to fail, and if they do, we may lose funds.
+ fn generate_claim_tx<F: Deref, L: Deref>(&mut self, height: u32, cached_request: &PackageTemplate, fee_estimator: &F, logger: &L) -> Option<(Option<u32>, u64, Transaction)>
+ where F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ if cached_request.outpoints().len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
+
+ // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
+ // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
+ let new_timer = Some(Self::get_height_timer(height, cached_request.timelock()));
+ let amt = cached_request.package_amount();
+ if cached_request.is_malleable() {
+ let predicted_weight = cached_request.package_weight(&self.destination_script);
+ if let Some((output_value, new_feerate)) = package::compute_output_value(predicted_weight, amt, cached_request.feerate(), fee_estimator, logger) {
+ assert!(new_feerate != 0);
+
+ let transaction = cached_request.finalize_package(self, output_value, self.destination_script.clone(), logger).unwrap();
+ log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate);
+ assert!(predicted_weight >= transaction.get_weight());
+ return Some((new_timer, new_feerate, transaction))
+ }
+ } else {
+ // Note: Currently, amounts of holder outputs spending witnesses aren't used
+ // as we can't malleate spending package to increase their feerate. This
+ // should change with the remaining anchor output patchset.
+ debug_assert!(amt == 0);
+ if let Some(transaction) = cached_request.finalize_package(self, amt, self.destination_script.clone(), logger) {
+ return Some((None, 0, transaction));
+ }
+ }
+ None
+ }
+
+ /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
+ /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
+ /// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
+ /// if we receive a preimage after force-close.
+ pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], requests: Vec<PackageTemplate>, latest_height: Option<u32>, broadcaster: &B, fee_estimator: &F, logger: &L)
+ where B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let height = match latest_height {
+ Some(h) => h,
+ None => self.latest_height,
+ };
+ log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
+ let mut preprocessed_requests = Vec::with_capacity(requests.len());
+ let mut aggregated_request = None;
+
+ // Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
+ // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
+ for req in requests {
+ // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
+ if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) { log_trace!(logger, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout); } else {
+ log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), height + CLTV_SHARED_CLAIM_BUFFER);
+ if req.timelock() <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
+ // Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
+ preprocessed_requests.push(req);
+ } else if aggregated_request.is_none() {
+ aggregated_request = Some(req);
+ } else {
+ aggregated_request.as_mut().unwrap().merge_package(req);
+ }
+ }
+ }
+ if let Some(req) = aggregated_request {
+ preprocessed_requests.push(req);
+ }
+
+ // Generate claim transactions and track them to bump if necessary at
+ // height timer expiration (i.e in how many blocks we're going to take action).
+ for mut req in preprocessed_requests {
+ if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &req, &*fee_estimator, &*logger) {
+ req.set_timer(new_timer);
+ req.set_feerate(new_feerate);
+ let txid = tx.txid();
+ for k in req.outpoints() {
+ log_trace!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
+ self.claimable_outpoints.insert(k.clone(), (txid, height));
+ }
+ self.pending_claim_requests.insert(txid, req);
+ log_trace!(logger, "Broadcasting onchain {}", log_tx!(tx));
+ broadcaster.broadcast_transaction(&tx);
+ }
+ }
+
+ let mut bump_candidates = HashMap::new();
+ for tx in txn_matched {
+ // Scan all input to verify is one of the outpoint spent is of interest for us
+ let mut claimed_outputs_material = Vec::new();
+ for inp in &tx.input {
+ if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
+ // If outpoint has claim request pending on it...
+ if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
+ //... we need to verify equality between transaction outpoints and claim request
+ // outpoints to know if transaction is the original claim or a bumped one issued
+ // by us.
+ let mut set_equality = true;
+ if request.outpoints().len() != tx.input.len() {
+ set_equality = false;
+ } else {
+ for (claim_inp, tx_inp) in request.outpoints().iter().zip(tx.input.iter()) {
+ if **claim_inp != tx_inp.previous_output {
+ set_equality = false;
+ }
+ }
+ }
+
+ macro_rules! clean_claim_request_after_safety_delay {
+ () => {
+ let entry = OnchainEventEntry {
+ txid: tx.txid(),
+ height,
+ event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }
+ };
+ if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
+ self.onchain_events_awaiting_threshold_conf.push(entry);
+ }
+ }
+ }
+
+ // If this is our transaction (or our counterparty spent all the outputs
+ // before we could anyway with same inputs order than us), wait for
+ // ANTI_REORG_DELAY and clean the RBF tracking map.
+ if set_equality {
+ clean_claim_request_after_safety_delay!();
+ } else { // If false, generate new claim request with update outpoint set
+ let mut at_least_one_drop = false;
+ for input in tx.input.iter() {
+ if let Some(package) = request.split_package(&input.previous_output) {
+ claimed_outputs_material.push(package);
+ at_least_one_drop = true;
+ }
+ // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
+ if request.outpoints().is_empty() {
+ clean_claim_request_after_safety_delay!();
+ }
+ }
+ //TODO: recompute soonest_timelock to avoid wasting a bit on fees
+ if at_least_one_drop {
+ bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone());
+ }
+ }
+ break; //No need to iterate further, either tx is our or their
+ } else {
+ panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
+ }
+ }
+ }
+ for package in claimed_outputs_material.drain(..) {
+ let entry = OnchainEventEntry {
+ txid: tx.txid(),
+ height,
+ event: OnchainEvent::ContentiousOutpoint { package },
+ };
+ if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
+ self.onchain_events_awaiting_threshold_conf.push(entry);
+ }
+ }
+ }
+
+ // After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
+ let onchain_events_awaiting_threshold_conf =
+ self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
+ for entry in onchain_events_awaiting_threshold_conf {
+ if entry.has_reached_confirmation_threshold(height) {
+ match entry.event {
+ OnchainEvent::Claim { claim_request } => {
+ // We may remove a whole set of claim outpoints here, as these one may have
+ // been aggregated in a single tx and claimed so atomically
+ if let Some(request) = self.pending_claim_requests.remove(&claim_request) {
+ for outpoint in request.outpoints() {
+ self.claimable_outpoints.remove(&outpoint);
+ }
+ }
+ },
+ OnchainEvent::ContentiousOutpoint { package } => {
+ self.claimable_outpoints.remove(&package.outpoints()[0]);
+ }
+ }
+ } else {
+ self.onchain_events_awaiting_threshold_conf.push(entry);
+ }
+ }
+
+ // Check if any pending claim request must be rescheduled
+ for (first_claim_txid, ref request) in self.pending_claim_requests.iter() {
+ if let Some(h) = request.timer() {
+ if height >= h {
+ bump_candidates.insert(*first_claim_txid, (*request).clone());
+ }
+ }
+ }
+
+ // Build, bump and rebroadcast tx accordingly
+ log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
+ for (first_claim_txid, request) in bump_candidates.iter() {
+ if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) {
+ log_trace!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
+ broadcaster.broadcast_transaction(&bump_tx);
+ if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
+ request.set_timer(new_timer);
+ request.set_feerate(new_feerate);
+ }
+ }
+ }
+ }
+
+ pub(crate) fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
+ &mut self,
+ txid: &Txid,
+ broadcaster: B,
+ fee_estimator: F,
+ logger: L,
+ ) where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let mut height = None;
+ for entry in self.onchain_events_awaiting_threshold_conf.iter() {
+ if entry.txid == *txid {
+ height = Some(entry.height);
+ break;
+ }
+ }
+
+ if let Some(height) = height {
+ self.block_disconnected(height, broadcaster, fee_estimator, logger);
+ }
+ }
+
+ pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F, logger: L)
+ where B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let mut bump_candidates = HashMap::new();
+ let onchain_events_awaiting_threshold_conf =
+ self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
+ for entry in onchain_events_awaiting_threshold_conf {
+ if entry.height >= height {
+ //- our claim tx on a commitment tx output
+ //- resurect outpoint back in its claimable set and regenerate tx
+ match entry.event {
+ OnchainEvent::ContentiousOutpoint { package } => {
+ if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&package.outpoints()[0]) {
+ if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
+ request.merge_package(package);
+ // Using a HashMap guarantee us than if we have multiple outpoints getting
+ // resurrected only one bump claim tx is going to be broadcast
+ bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone());
+ }
+ }
+ },
+ _ => {},
+ }
+ } else {
+ self.onchain_events_awaiting_threshold_conf.push(entry);
+ }
+ }
+ for (_, request) in bump_candidates.iter_mut() {
+ if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &&*fee_estimator, &&*logger) {
+ request.set_timer(new_timer);
+ request.set_feerate(new_feerate);
+ log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
+ broadcaster.broadcast_transaction(&bump_tx);
+ }
+ }
+ for (ancestor_claim_txid, request) in bump_candidates.drain() {
+ self.pending_claim_requests.insert(ancestor_claim_txid.0, request);
+ }
+ //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
+ // right now if one of the outpoint get disconnected, just erase whole pending claim request.
+ let mut remove_request = Vec::new();
+ self.claimable_outpoints.retain(|_, ref v|
+ if v.1 >= height {
+ remove_request.push(v.0.clone());
+ false
+ } else { true });
+ for req in remove_request {
+ self.pending_claim_requests.remove(&req);
+ }
+ }
+
+ pub(crate) fn get_relevant_txids(&self) -> Vec<Txid> {
+ let mut txids: Vec<Txid> = self.onchain_events_awaiting_threshold_conf
+ .iter()
+ .map(|entry| entry.txid)
+ .collect();
+ txids.sort_unstable();
+ txids.dedup();
+ txids
+ }
+
+ pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
+ self.prev_holder_commitment = Some(replace(&mut self.holder_commitment, tx));
+ self.holder_htlc_sigs = None;
+ }
+
+ // Normally holder HTLCs are signed at the same time as the holder commitment tx. However,
+ // in some configurations, the holder commitment tx has been signed and broadcast by a
+ // ChannelMonitor replica, so we handle that case here.
+ fn sign_latest_holder_htlcs(&mut self) {
+ if self.holder_htlc_sigs.is_none() {
+ let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
+ self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, sigs));
+ }
+ }
+
+ // Normally only the latest commitment tx and HTLCs need to be signed. However, in some
+ // configurations we may have updated our holder commitment but a replica of the ChannelMonitor
+ // broadcast the previous one before we sync with it. We handle that case here.
+ fn sign_prev_holder_htlcs(&mut self) {
+ if self.prev_holder_htlc_sigs.is_none() {
+ if let Some(ref holder_commitment) = self.prev_holder_commitment {
+ let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(holder_commitment, &self.secp_ctx).expect("sign previous holder commitment");
+ self.prev_holder_htlc_sigs = Some(Self::extract_holder_sigs(holder_commitment, sigs));
+ }
+ }
+ }
+
+ fn extract_holder_sigs(holder_commitment: &HolderCommitmentTransaction, sigs: Vec<Signature>) -> Vec<Option<(usize, Signature)>> {
+ let mut ret = Vec::new();
+ for (htlc_idx, (holder_sig, htlc)) in sigs.iter().zip(holder_commitment.htlcs().iter()).enumerate() {
+ let tx_idx = htlc.transaction_output_index.unwrap();
+ if ret.len() <= tx_idx as usize { ret.resize(tx_idx as usize + 1, None); }
+ ret[tx_idx as usize] = Some((htlc_idx, holder_sig.clone()));
+ }
+ ret
+ }
+
+ //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
+ // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
+ // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
+ // to monitor before.
+ pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
+ let (sig, htlc_sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("signing holder commitment");
+ self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
+ self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
+ }
+
+ #[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
+ pub(crate) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
+ let (sig, htlc_sigs) = self.signer.unsafe_sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
+ self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
+ self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
+ }
+
+ pub(crate) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
+ let mut htlc_tx = None;
+ let commitment_txid = self.holder_commitment.trust().txid();
+ // Check if the HTLC spends from the current holder commitment
+ if commitment_txid == outp.txid {
+ self.sign_latest_holder_htlcs();
+ if let &Some(ref htlc_sigs) = &self.holder_htlc_sigs {
+ let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
+ let trusted_tx = self.holder_commitment.trust();
+ let counterparty_htlc_sig = self.holder_commitment.counterparty_htlc_sigs[*htlc_idx];
+ htlc_tx = Some(trusted_tx
+ .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
+ }
+ }
+ // If the HTLC doesn't spend the current holder commitment, check if it spends the previous one
+ if htlc_tx.is_none() && self.prev_holder_commitment.is_some() {
+ let commitment_txid = self.prev_holder_commitment.as_ref().unwrap().trust().txid();
+ if commitment_txid == outp.txid {
+ self.sign_prev_holder_htlcs();
+ if let &Some(ref htlc_sigs) = &self.prev_holder_htlc_sigs {
+ let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
+ let holder_commitment = self.prev_holder_commitment.as_ref().unwrap();
+ let trusted_tx = holder_commitment.trust();
+ let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[*htlc_idx];
+ htlc_tx = Some(trusted_tx
+ .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
+ }
+ }
+ }
+ htlc_tx
+ }
+
+ #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
+ pub(crate) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
+ let latest_had_sigs = self.holder_htlc_sigs.is_some();
+ let prev_had_sigs = self.prev_holder_htlc_sigs.is_some();
+ let ret = self.get_fully_signed_htlc_tx(outp, preimage);
+ if !latest_had_sigs {
+ self.holder_htlc_sigs = None;
+ }
+ if !prev_had_sigs {
+ self.prev_holder_htlc_sigs = None;
+ }
+ ret
+ }
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Various utilities to assemble claimable outpoints in package of one or more transactions. Those
+//! packages are attached metadata, guiding their aggregable or fee-bumping re-schedule. This file
+//! also includes witness weight computation and fee computation methods.
+
+use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
+use bitcoin::blockdata::transaction::{TxOut,TxIn, Transaction, SigHashType};
+use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+use bitcoin::blockdata::script::Script;
+
+use bitcoin::hash_types::Txid;
+
+use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+
+use ln::PaymentPreimage;
+use ln::chan_utils::{TxCreationKeys, HTLCOutputInCommitment, HTLC_OUTPUT_IN_COMMITMENT_SIZE};
+use ln::chan_utils;
+use ln::msgs::DecodeError;
+use chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
+use chain::keysinterface::Sign;
+use chain::onchaintx::OnchainTxHandler;
+use util::byte_utils;
+use util::logger::Logger;
+use util::ser::{Readable, Writer, Writeable};
+
+use std::cmp;
+use std::mem;
+use std::ops::Deref;
+
+const MAX_ALLOC_SIZE: usize = 64*1024;
+
+
+// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
+pub(crate) const WEIGHT_REVOKED_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 133;
+// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
+pub(crate) const WEIGHT_REVOKED_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 139;
+// number_of_witness_elements + sig_length + counterpartyhtlc_sig + preimage_length + preimage + witness_script_length + witness_script
+pub(crate) const WEIGHT_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 32 + 1 + 133;
+// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
+pub(crate) const WEIGHT_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 139;
+// number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
+pub(crate) const WEIGHT_REVOKED_OUTPUT: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 77;
+
+/// A struct to describe a revoked output and corresponding information to generate a solving
+/// witness spending a commitment `to_local` output or a second-stage HTLC transaction output.
+///
+/// CSV and pubkeys are used as part of a witnessScript redeeming a balance output, amount is used
+/// as part of the signature hash and revocation secret to generate a satisfying witness.
+#[derive(Clone, PartialEq)]
+pub(crate) struct RevokedOutput {
+ per_commitment_point: PublicKey,
+ counterparty_delayed_payment_base_key: PublicKey,
+ counterparty_htlc_base_key: PublicKey,
+ per_commitment_key: SecretKey,
+ weight: u64,
+ amount: u64,
+ on_counterparty_tx_csv: u16,
+}
+
+impl RevokedOutput {
+ pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, on_counterparty_tx_csv: u16) -> Self {
+ RevokedOutput {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ per_commitment_key,
+ weight: WEIGHT_REVOKED_OUTPUT,
+ amount,
+ on_counterparty_tx_csv
+ }
+ }
+}
+
+impl_writeable!(RevokedOutput, 33*3 + 32 + 8 + 8 + 2, {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ per_commitment_key,
+ weight,
+ amount,
+ on_counterparty_tx_csv
+});
+
+/// A struct to describe a revoked offered output and corresponding information to generate a
+/// solving witness.
+///
+/// HTLCOuputInCommitment (hash timelock, direction) and pubkeys are used to generate a suitable
+/// witnessScript.
+///
+/// CSV is used as part of a witnessScript redeeming a balance output, amount is used as part
+/// of the signature hash and revocation secret to generate a satisfying witness.
+#[derive(Clone, PartialEq)]
+pub(crate) struct RevokedHTLCOutput {
+ per_commitment_point: PublicKey,
+ counterparty_delayed_payment_base_key: PublicKey,
+ counterparty_htlc_base_key: PublicKey,
+ per_commitment_key: SecretKey,
+ weight: u64,
+ amount: u64,
+ htlc: HTLCOutputInCommitment,
+}
+
+impl RevokedHTLCOutput {
+ pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment) -> Self {
+ let weight = if htlc.offered { WEIGHT_REVOKED_OFFERED_HTLC } else { WEIGHT_REVOKED_RECEIVED_HTLC };
+ RevokedHTLCOutput {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ per_commitment_key,
+ weight,
+ amount,
+ htlc
+ }
+ }
+}
+
+impl_writeable!(RevokedHTLCOutput, 33*3 + 32 + 8 + 8 + HTLC_OUTPUT_IN_COMMITMENT_SIZE, {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ per_commitment_key,
+ weight,
+ amount,
+ htlc
+});
+
+/// A struct to describe a HTLC output on a counterparty commitment transaction.
+///
+/// HTLCOutputInCommitment (hash, timelock, directon) and pubkeys are used to generate a suitable
+/// witnessScript.
+///
+/// The preimage is used as part of the witness.
+#[derive(Clone, PartialEq)]
+pub(crate) struct CounterpartyOfferedHTLCOutput {
+ per_commitment_point: PublicKey,
+ counterparty_delayed_payment_base_key: PublicKey,
+ counterparty_htlc_base_key: PublicKey,
+ preimage: PaymentPreimage,
+ htlc: HTLCOutputInCommitment
+}
+
+impl CounterpartyOfferedHTLCOutput {
+ pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment) -> Self {
+ CounterpartyOfferedHTLCOutput {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ preimage,
+ htlc
+ }
+ }
+}
+
+impl_writeable!(CounterpartyOfferedHTLCOutput, 33*3 + 32 + HTLC_OUTPUT_IN_COMMITMENT_SIZE, {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ preimage,
+ htlc
+});
+
+/// A struct to describe a HTLC output on a counterparty commitment transaction.
+///
+/// HTLCOutputInCommitment (hash, timelock, directon) and pubkeys are used to generate a suitable
+/// witnessScript.
+#[derive(Clone, PartialEq)]
+pub(crate) struct CounterpartyReceivedHTLCOutput {
+ per_commitment_point: PublicKey,
+ counterparty_delayed_payment_base_key: PublicKey,
+ counterparty_htlc_base_key: PublicKey,
+ htlc: HTLCOutputInCommitment
+}
+
+impl CounterpartyReceivedHTLCOutput {
+ pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, htlc: HTLCOutputInCommitment) -> Self {
+ CounterpartyReceivedHTLCOutput {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ htlc
+ }
+ }
+}
+
+impl_writeable!(CounterpartyReceivedHTLCOutput, 33*3 + HTLC_OUTPUT_IN_COMMITMENT_SIZE, {
+ per_commitment_point,
+ counterparty_delayed_payment_base_key,
+ counterparty_htlc_base_key,
+ htlc
+});
+
+/// A struct to describe a HTLC output on holder commitment transaction.
+///
+/// Either offered or received, the amount is always used as part of the bip143 sighash.
+/// Preimage is only included as part of the witness in former case.
+#[derive(Clone, PartialEq)]
+pub(crate) struct HolderHTLCOutput {
+ preimage: Option<PaymentPreimage>,
+ amount: u64,
+}
+
+impl HolderHTLCOutput {
+ pub(crate) fn build(preimage: Option<PaymentPreimage>, amount: u64) -> Self {
+ HolderHTLCOutput {
+ preimage,
+ amount
+ }
+ }
+}
+
+impl_writeable!(HolderHTLCOutput, 0, {
+ preimage,
+ amount
+});
+
+/// A struct to describe the channel output on the funding transaction.
+///
+/// witnessScript is used as part of the witness redeeming the funding utxo.
+#[derive(Clone, PartialEq)]
+pub(crate) struct HolderFundingOutput {
+ funding_redeemscript: Script,
+}
+
+impl HolderFundingOutput {
+ pub(crate) fn build(funding_redeemscript: Script) -> Self {
+ HolderFundingOutput {
+ funding_redeemscript,
+ }
+ }
+}
+
+impl_writeable!(HolderFundingOutput, 0, {
+ funding_redeemscript
+});
+
+/// A wrapper encapsulating all in-protocol differing outputs types.
+///
+/// The generic API offers access to an outputs common attributes or allow transformation such as
+/// finalizing an input claiming the output.
+#[derive(Clone, PartialEq)]
+pub(crate) enum PackageSolvingData {
+ RevokedOutput(RevokedOutput),
+ RevokedHTLCOutput(RevokedHTLCOutput),
+ CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput),
+ CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput),
+ HolderHTLCOutput(HolderHTLCOutput),
+ HolderFundingOutput(HolderFundingOutput),
+}
+
+impl PackageSolvingData {
+ fn amount(&self) -> u64 {
+ let amt = match self {
+ PackageSolvingData::RevokedOutput(ref outp) => { outp.amount },
+ PackageSolvingData::RevokedHTLCOutput(ref outp) => { outp.amount },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => { outp.htlc.amount_msat / 1000 },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => { outp.htlc.amount_msat / 1000 },
+ // Note: Currently, amounts of holder outputs spending witnesses aren't used
+ // as we can't malleate spending package to increase their feerate. This
+ // should change with the remaining anchor output patchset.
+ PackageSolvingData::HolderHTLCOutput(..) => { 0 },
+ PackageSolvingData::HolderFundingOutput(..) => { 0 },
+ };
+ amt
+ }
+ fn weight(&self) -> usize {
+ let weight = match self {
+ PackageSolvingData::RevokedOutput(ref outp) => { outp.weight as usize },
+ PackageSolvingData::RevokedHTLCOutput(ref outp) => { outp.weight as usize },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { WEIGHT_OFFERED_HTLC as usize },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { WEIGHT_RECEIVED_HTLC as usize },
+ // Note: Currently, weights of holder outputs spending witnesses aren't used
+ // as we can't malleate spending package to increase their feerate. This
+ // should change with the remaining anchor output patchset.
+ PackageSolvingData::HolderHTLCOutput(..) => { debug_assert!(false); 0 },
+ PackageSolvingData::HolderFundingOutput(..) => { debug_assert!(false); 0 },
+ };
+ weight
+ }
+ fn is_compatible(&self, input: &PackageSolvingData) -> bool {
+ match self {
+ PackageSolvingData::RevokedOutput(..) => {
+ match input {
+ PackageSolvingData::RevokedHTLCOutput(..) => { true },
+ PackageSolvingData::RevokedOutput(..) => { true },
+ _ => { false }
+ }
+ },
+ PackageSolvingData::RevokedHTLCOutput(..) => {
+ match input {
+ PackageSolvingData::RevokedOutput(..) => { true },
+ PackageSolvingData::RevokedHTLCOutput(..) => { true },
+ _ => { false }
+ }
+ },
+ _ => { mem::discriminant(self) == mem::discriminant(&input) }
+ }
+ }
+ fn finalize_input<Signer: Sign>(&self, bumped_tx: &mut Transaction, i: usize, onchain_handler: &mut OnchainTxHandler<Signer>) -> bool {
+ match self {
+ PackageSolvingData::RevokedOutput(ref outp) => {
+ if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
+ let witness_script = chan_utils::get_revokeable_redeemscript(&chan_keys.revocation_key, outp.on_counterparty_tx_csv, &chan_keys.broadcaster_delayed_payment_key);
+ //TODO: should we panic on signer failure ?
+ if let Ok(sig) = onchain_handler.signer.sign_justice_revoked_output(&bumped_tx, i, outp.amount, &outp.per_commitment_key, &onchain_handler.secp_ctx) {
+ bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
+ bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+ bumped_tx.input[i].witness.push(vec!(1));
+ bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
+ } else { return false; }
+ }
+ },
+ PackageSolvingData::RevokedHTLCOutput(ref outp) => {
+ if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
+ let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
+ //TODO: should we panic on signer failure ?
+ if let Ok(sig) = onchain_handler.signer.sign_justice_revoked_htlc(&bumped_tx, i, outp.amount, &outp.per_commitment_key, &outp.htlc, &onchain_handler.secp_ctx) {
+ bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
+ bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+ bumped_tx.input[i].witness.push(chan_keys.revocation_key.clone().serialize().to_vec());
+ bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
+ } else { return false; }
+ }
+ },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => {
+ if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
+ let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
+
+ if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
+ bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
+ bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+ bumped_tx.input[i].witness.push(outp.preimage.0.to_vec());
+ bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
+ }
+ }
+ },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => {
+ if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
+ let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
+
+ bumped_tx.lock_time = outp.htlc.cltv_expiry; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
+ if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
+ bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
+ bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+ // Due to BIP146 (MINIMALIF) this must be a zero-length element to relay.
+ bumped_tx.input[i].witness.push(vec![]);
+ bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
+ }
+ }
+ },
+ _ => { panic!("API Error!"); }
+ }
+ true
+ }
+ fn get_finalized_tx<Signer: Sign>(&self, outpoint: &BitcoinOutPoint, onchain_handler: &mut OnchainTxHandler<Signer>) -> Option<Transaction> {
+ match self {
+ PackageSolvingData::HolderHTLCOutput(ref outp) => { return onchain_handler.get_fully_signed_htlc_tx(outpoint, &outp.preimage); }
+ PackageSolvingData::HolderFundingOutput(ref outp) => { return Some(onchain_handler.get_fully_signed_holder_tx(&outp.funding_redeemscript)); }
+ _ => { panic!("API Error!"); }
+ }
+ }
+}
+
+impl Writeable for PackageSolvingData {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ PackageSolvingData::RevokedOutput(ref revoked_outp) => {
+ 0u8.write(writer)?;
+ revoked_outp.write(writer)?;
+ },
+ PackageSolvingData::RevokedHTLCOutput(ref revoked_outp) => {
+ 1u8.write(writer)?;
+ revoked_outp.write(writer)?;
+ },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(ref counterparty_outp) => {
+ 2u8.write(writer)?;
+ counterparty_outp.write(writer)?;
+ },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(ref counterparty_outp) => {
+ 3u8.write(writer)?;
+ counterparty_outp.write(writer)?;
+ },
+ PackageSolvingData::HolderHTLCOutput(ref holder_outp) => {
+ 4u8.write(writer)?;
+ holder_outp.write(writer)?;
+ },
+ PackageSolvingData::HolderFundingOutput(ref funding_outp) => {
+ 5u8.write(writer)?;
+ funding_outp.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl Readable for PackageSolvingData {
+ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let byte = <u8 as Readable>::read(reader)?;
+ let solving_data = match byte {
+ 0 => {
+ PackageSolvingData::RevokedOutput(Readable::read(reader)?)
+ },
+ 1 => {
+ PackageSolvingData::RevokedHTLCOutput(Readable::read(reader)?)
+ },
+ 2 => {
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(Readable::read(reader)?)
+ },
+ 3 => {
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(Readable::read(reader)?)
+ },
+ 4 => {
+ PackageSolvingData::HolderHTLCOutput(Readable::read(reader)?)
+ },
+ 5 => {
+ PackageSolvingData::HolderFundingOutput(Readable::read(reader)?)
+ }
+ _ => return Err(DecodeError::UnknownVersion)
+ };
+ Ok(solving_data)
+ }
+}
+
+/// A malleable package might be aggregated with other packages to save on fees.
+/// A untractable package has been counter-signed and aggregable will break cached counterparty
+/// signatures.
+#[derive(Clone, PartialEq)]
+pub(crate) enum PackageMalleability {
+ Malleable,
+ Untractable,
+}
+
+/// A structure to describe a package content that is generated by ChannelMonitor and
+/// used by OnchainTxHandler to generate and broadcast transactions settling onchain claims.
+///
+/// A package is defined as one or more transactions claiming onchain outputs in reaction
+/// to confirmation of a channel transaction. Those packages might be aggregated to save on
+/// fees, if satisfaction of outputs's witnessScript let's us do so.
+///
+/// As packages are time-sensitive, we fee-bump and rebroadcast them at scheduled intervals.
+/// Failing to confirm a package translate as a loss of funds for the user.
+#[derive(Clone, PartialEq)]
+pub struct PackageTemplate {
+ // List of onchain outputs and solving data to generate satisfying witnesses.
+ inputs: Vec<(BitcoinOutPoint, PackageSolvingData)>,
+ // Packages are deemed as malleable if we have local knwoledge of at least one set of
+ // private keys yielding a satisfying witnesses. Malleability implies that we can aggregate
+ // packages among them to save on fees or rely on RBF to bump their feerates.
+ // Untractable packages have been counter-signed and thus imply that we can't aggregate
+ // them without breaking signatures. Fee-bumping strategy will also rely on CPFP.
+ malleability: PackageMalleability,
+ // Block height after which the earlier-output belonging to this package is mature for a
+ // competing claim by the counterparty. As our chain tip becomes nearer from the timelock,
+ // the fee-bumping frequency will increase. See `OnchainTxHandler::get_height_timer`.
+ soonest_conf_deadline: u32,
+ // Determines if this package can be aggregated.
+ // Timelocked outputs belonging to the same transaction might have differing
+ // satisfying heights. Picking up the later height among the output set would be a valid
+ // aggregable strategy but it comes with at least 2 trade-offs :
+ // * earlier-output fund are going to take longer to come back
+ // * CLTV delta backing up a corresponding HTLC on an upstream channel could be swallowed
+ // by the requirement of the later-output part of the set
+ // For now, we mark such timelocked outputs as non-aggregable, though we might introduce
+ // smarter aggregable strategy in the future.
+ aggregable: bool,
+ // Cache of package feerate committed at previous (re)broadcast. If bumping resources
+ // (either claimed output value or external utxo), it will keep increasing until holder
+ // or counterparty successful claim.
+ feerate_previous: u64,
+ // Cache of next height at which fee-bumping and rebroadcast will be attempted. In
+ // the future, we might abstract it to an observed mempool fluctuation.
+ height_timer: Option<u32>,
+ // Confirmation height of the claimed outputs set transaction. In case of reorg reaching
+ // it, we wipe out and forget the package.
+ height_original: u32,
+}
+
+impl PackageTemplate {
+ pub(crate) fn is_malleable(&self) -> bool {
+ self.malleability == PackageMalleability::Malleable
+ }
+ pub(crate) fn timelock(&self) -> u32 {
+ self.soonest_conf_deadline
+ }
+ pub(crate) fn aggregable(&self) -> bool {
+ self.aggregable
+ }
+ pub(crate) fn feerate(&self) -> u64 {
+ self.feerate_previous
+ }
+ pub(crate) fn set_feerate(&mut self, new_feerate: u64) {
+ self.feerate_previous = new_feerate;
+ }
+ pub(crate) fn timer(&self) -> Option<u32> {
+ if let Some(ref timer) = self.height_timer {
+ return Some(*timer);
+ }
+ None
+ }
+ pub(crate) fn set_timer(&mut self, new_timer: Option<u32>) {
+ self.height_timer = new_timer;
+ }
+ pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> {
+ self.inputs.iter().map(|(o, _)| o).collect()
+ }
+ pub(crate) fn split_package(&mut self, split_outp: &BitcoinOutPoint) -> Option<PackageTemplate> {
+ match self.malleability {
+ PackageMalleability::Malleable => {
+ let mut split_package = None;
+ let timelock = self.soonest_conf_deadline;
+ let aggregable = self.aggregable;
+ let feerate_previous = self.feerate_previous;
+ let height_timer = self.height_timer;
+ let height_original = self.height_original;
+ self.inputs.retain(|outp| {
+ if *split_outp == outp.0 {
+ split_package = Some(PackageTemplate {
+ inputs: vec![(outp.0, outp.1.clone())],
+ malleability: PackageMalleability::Malleable,
+ soonest_conf_deadline: timelock,
+ aggregable,
+ feerate_previous,
+ height_timer,
+ height_original,
+ });
+ return false;
+ }
+ return true;
+ });
+ return split_package;
+ },
+ _ => {
+ // Note, we may try to split on remote transaction for
+ // which we don't have a competing one (HTLC-Success before
+ // timelock expiration). This explain we don't panic!
+ // We should refactor OnchainTxHandler::block_connected to
+ // only test equality on competing claims.
+ return None;
+ }
+ }
+ }
+ pub(crate) fn merge_package(&mut self, mut merge_from: PackageTemplate) {
+ assert_eq!(self.height_original, merge_from.height_original);
+ if self.malleability == PackageMalleability::Untractable || merge_from.malleability == PackageMalleability::Untractable {
+ panic!("Merging template on untractable packages");
+ }
+ if !self.aggregable || !merge_from.aggregable {
+ panic!("Merging non aggregatable packages");
+ }
+ if let Some((_, lead_input)) = self.inputs.first() {
+ for (_, v) in merge_from.inputs.iter() {
+ if !lead_input.is_compatible(v) { panic!("Merging outputs from differing types !"); }
+ }
+ } else { panic!("Merging template on an empty package"); }
+ for (k, v) in merge_from.inputs.drain(..) {
+ self.inputs.push((k, v));
+ }
+ //TODO: verify coverage and sanity?
+ if self.soonest_conf_deadline > merge_from.soonest_conf_deadline {
+ self.soonest_conf_deadline = merge_from.soonest_conf_deadline;
+ }
+ if self.feerate_previous > merge_from.feerate_previous {
+ self.feerate_previous = merge_from.feerate_previous;
+ }
+ self.height_timer = cmp::min(self.height_timer, merge_from.height_timer);
+ }
+ pub(crate) fn package_amount(&self) -> u64 {
+ let mut amounts = 0;
+ for (_, outp) in self.inputs.iter() {
+ amounts += outp.amount();
+ }
+ amounts
+ }
+ pub(crate) fn package_weight(&self, destination_script: &Script) -> usize {
+ let mut inputs_weight = 0;
+ let mut witnesses_weight = 2; // count segwit flags
+ for (_, outp) in self.inputs.iter() {
+ // previous_out_point: 36 bytes ; var_int: 1 byte ; sequence: 4 bytes
+ inputs_weight += 41 * WITNESS_SCALE_FACTOR;
+ witnesses_weight += outp.weight();
+ }
+ // version: 4 bytes ; count_tx_in: 1 byte ; count_tx_out: 1 byte ; lock_time: 4 bytes
+ let transaction_weight = 10 * WITNESS_SCALE_FACTOR;
+ // value: 8 bytes ; var_int: 1 byte ; pk_script: `destination_script.len()`
+ let output_weight = (8 + 1 + destination_script.len()) * WITNESS_SCALE_FACTOR;
+ inputs_weight + witnesses_weight + transaction_weight + output_weight
+ }
+ pub(crate) fn finalize_package<L: Deref, Signer: Sign>(&self, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64, destination_script: Script, logger: &L) -> Option<Transaction>
+ where L::Target: Logger,
+ {
+ match self.malleability {
+ PackageMalleability::Malleable => {
+ let mut bumped_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: vec![],
+ output: vec![TxOut {
+ script_pubkey: destination_script,
+ value,
+ }],
+ };
+ for (outpoint, _) in self.inputs.iter() {
+ bumped_tx.input.push(TxIn {
+ previous_output: *outpoint,
+ script_sig: Script::new(),
+ sequence: 0xfffffffd,
+ witness: Vec::new(),
+ });
+ }
+ for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
+ log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+ if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; }
+ }
+ log_trace!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
+ return Some(bumped_tx);
+ },
+ PackageMalleability::Untractable => {
+ if let Some((outpoint, outp)) = self.inputs.first() {
+ if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
+ log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+ log_trace!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
+ return Some(final_tx);
+ }
+ return None;
+ } else { panic!("API Error: Package must not be inputs empty"); }
+ },
+ }
+ }
+ pub (crate) fn build_package(txid: Txid, vout: u32, input_solving_data: PackageSolvingData, soonest_conf_deadline: u32, aggregable: bool, height_original: u32) -> Self {
+ let malleability = match input_solving_data {
+ PackageSolvingData::RevokedOutput(..) => { PackageMalleability::Malleable },
+ PackageSolvingData::RevokedHTLCOutput(..) => { PackageMalleability::Malleable },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { PackageMalleability::Malleable },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { PackageMalleability::Malleable },
+ PackageSolvingData::HolderHTLCOutput(..) => { PackageMalleability::Untractable },
+ PackageSolvingData::HolderFundingOutput(..) => { PackageMalleability::Untractable },
+ };
+ let mut inputs = Vec::with_capacity(1);
+ inputs.push((BitcoinOutPoint { txid, vout }, input_solving_data));
+ PackageTemplate {
+ inputs,
+ malleability,
+ soonest_conf_deadline,
+ aggregable,
+ feerate_previous: 0,
+ height_timer: None,
+ height_original,
+ }
+ }
+}
+
+impl Writeable for PackageTemplate {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ writer.write_all(&byte_utils::be64_to_array(self.inputs.len() as u64))?;
+ for (ref outpoint, ref rev_outp) in self.inputs.iter() {
+ outpoint.write(writer)?;
+ rev_outp.write(writer)?;
+ }
+ self.soonest_conf_deadline.write(writer)?;
+ self.feerate_previous.write(writer)?;
+ self.height_timer.write(writer)?;
+ self.height_original.write(writer)?;
+ Ok(())
+ }
+}
+
+impl Readable for PackageTemplate {
+ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let inputs_count = <u64 as Readable>::read(reader)?;
+ let mut inputs: Vec<(BitcoinOutPoint, PackageSolvingData)> = Vec::with_capacity(cmp::min(inputs_count as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..inputs_count {
+ let outpoint = Readable::read(reader)?;
+ let rev_outp = Readable::read(reader)?;
+ inputs.push((outpoint, rev_outp));
+ }
+ let (malleability, aggregable) = if let Some((_, lead_input)) = inputs.first() {
+ match lead_input {
+ PackageSolvingData::RevokedOutput(..) => { (PackageMalleability::Malleable, true) },
+ PackageSolvingData::RevokedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { (PackageMalleability::Malleable, false) },
+ PackageSolvingData::HolderHTLCOutput(..) => { (PackageMalleability::Untractable, false) },
+ PackageSolvingData::HolderFundingOutput(..) => { (PackageMalleability::Untractable, false) },
+ }
+ } else { return Err(DecodeError::InvalidValue); };
+ let soonest_conf_deadline = Readable::read(reader)?;
+ let feerate_previous = Readable::read(reader)?;
+ let height_timer = Readable::read(reader)?;
+ let height_original = Readable::read(reader)?;
+ Ok(PackageTemplate {
+ inputs,
+ malleability,
+ soonest_conf_deadline,
+ aggregable,
+ feerate_previous,
+ height_timer,
+ height_original,
+ })
+ }
+}
+
+/// Attempt to propose a bumping fee for a transaction from its spent output's values and predicted
+/// weight. We start with the highest priority feerate returned by the node's fee estimator then
+/// fall-back to lower priorities until we have enough value available to suck from.
+///
+/// If the proposed fee is less than the available spent output's values, we return the proposed
+/// fee and the corresponding updated feerate. If the proposed fee is equal or more than the
+/// available spent output's values, we return nothing
+fn compute_fee_from_spent_amounts<F: Deref, L: Deref>(input_amounts: u64, predicted_weight: usize, fee_estimator: &F, logger: &L) -> Option<(u64, u64)>
+ where F::Target: FeeEstimator,
+ L::Target: Logger,
+{
+ let mut updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64;
+ let mut fee = updated_feerate * (predicted_weight as u64) / 1000;
+ if input_amounts <= fee {
+ updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal) as u64;
+ fee = updated_feerate * (predicted_weight as u64) / 1000;
+ if input_amounts <= fee {
+ updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background) as u64;
+ fee = updated_feerate * (predicted_weight as u64) / 1000;
+ if input_amounts <= fee {
+ log_error!(logger, "Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat)",
+ fee, input_amounts);
+ None
+ } else {
+ log_warn!(logger, "Used low priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
+ input_amounts);
+ Some((fee, updated_feerate))
+ }
+ } else {
+ log_warn!(logger, "Used medium priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
+ input_amounts);
+ Some((fee, updated_feerate))
+ }
+ } else {
+ Some((fee, updated_feerate))
+ }
+}
+
+/// Attempt to propose a bumping fee for a transaction from its spent output's values and predicted
+/// weight. If feerates proposed by the fee-estimator have been increasing since last fee-bumping
+/// attempt, use them. Otherwise, blindly bump the feerate by 25% of the previous feerate. We also
+/// verify that those bumping heuristics respect BIP125 rules 3) and 4) and if required adjust
+/// the new fee to meet the RBF policy requirement.
+fn feerate_bump<F: Deref, L: Deref>(predicted_weight: usize, input_amounts: u64, previous_feerate: u64, fee_estimator: &F, logger: &L) -> Option<(u64, u64)>
+ where F::Target: FeeEstimator,
+ L::Target: Logger,
+{
+ // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
+ let new_fee = if let Some((new_fee, _)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) {
+ let updated_feerate = new_fee / (predicted_weight as u64 * 1000);
+ if updated_feerate > previous_feerate {
+ new_fee
+ } else {
+ // ...else just increase the previous feerate by 25% (because that's a nice number)
+ let new_fee = previous_feerate * (predicted_weight as u64) / 750;
+ if input_amounts <= new_fee {
+ log_trace!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
+ return None;
+ }
+ new_fee
+ }
+ } else {
+ log_trace!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
+ return None;
+ };
+
+ let previous_fee = previous_feerate * (predicted_weight as u64) / 1000;
+ let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * (predicted_weight as u64) / 1000;
+ // BIP 125 Opt-in Full Replace-by-Fee Signaling
+ // * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions.
+ // * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting.
+ let new_fee = if new_fee < previous_fee + min_relay_fee {
+ new_fee + previous_fee + min_relay_fee - new_fee
+ } else {
+ new_fee
+ };
+ Some((new_fee, new_fee * 1000 / (predicted_weight as u64)))
+}
+
+/// Deduce a new proposed fee from the claiming transaction output value.
+/// If the new proposed fee is superior to the consumed outpoint's value, burn everything in miner's
+/// fee to deter counterparties attacker.
+pub(crate) fn compute_output_value<F: Deref, L: Deref>(predicted_weight: usize, input_amounts: u64, previous_feerate: u64, fee_estimator: &F, logger: &L) -> Option<(u64, u64)>
+ where F::Target: FeeEstimator,
+ L::Target: Logger,
+{
+ // If old feerate is 0, first iteration of this claim, use normal fee calculation
+ if previous_feerate != 0 {
+ if let Some((new_fee, feerate)) = feerate_bump(predicted_weight, input_amounts, previous_feerate, fee_estimator, logger) {
+ // If new computed fee is superior at the whole claimable amount burn all in fees
+ if new_fee > input_amounts {
+ return Some((0, feerate));
+ } else {
+ return Some((input_amounts - new_fee, feerate));
+ }
+ }
+ } else {
+ if let Some((new_fee, feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) {
+ return Some((input_amounts - new_fee, feerate));
+ }
+ }
+ None
+}
pub mod peer_handler;
pub mod chan_utils;
pub mod features;
-pub mod onchaintx;
-pub mod package;
#[cfg(feature = "fuzztarget")]
pub mod peer_channel_encryptor;
+++ /dev/null
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-//! The logic to build claims and bump in-flight transactions until confirmations.
-//!
-//! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
-//! building, tracking, bumping and notifications functions.
-
-use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
-use bitcoin::blockdata::script::Script;
-
-use bitcoin::hash_types::Txid;
-
-use bitcoin::secp256k1::{Secp256k1, Signature};
-use bitcoin::secp256k1;
-
-use ln::msgs::DecodeError;
-use ln::PaymentPreimage;
-use ln::chan_utils::{ChannelTransactionParameters, HolderCommitmentTransaction};
-use ln::package;
-use ln::package::PackageTemplate;
-use chain::chaininterface::{FeeEstimator, BroadcasterInterface};
-use chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
-use chain::keysinterface::{Sign, KeysInterface};
-use util::logger::Logger;
-use util::ser::{Readable, ReadableArgs, Writer, Writeable, VecWriter};
-use util::byte_utils;
-
-use std::collections::HashMap;
-use core::cmp;
-use core::ops::Deref;
-use core::mem::replace;
-
-const MAX_ALLOC_SIZE: usize = 64*1024;
-
-/// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
-/// transaction causing it.
-///
-/// Used to determine when the on-chain event can be considered safe from a chain reorganization.
-#[derive(PartialEq)]
-struct OnchainEventEntry {
- txid: Txid,
- height: u32,
- event: OnchainEvent,
-}
-
-impl OnchainEventEntry {
- fn confirmation_threshold(&self) -> u32 {
- self.height + ANTI_REORG_DELAY - 1
- }
-
- fn has_reached_confirmation_threshold(&self, height: u32) -> bool {
- height >= self.confirmation_threshold()
- }
-}
-
-/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
-/// once they mature to enough confirmations (ANTI_REORG_DELAY)
-#[derive(PartialEq)]
-enum OnchainEvent {
- /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
- /// bump-txn candidate buffer.
- Claim {
- claim_request: Txid,
- },
- /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
- /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
- /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
- ContentiousOutpoint {
- package: PackageTemplate,
- }
-}
-
-impl Readable for Option<Vec<Option<(usize, Signature)>>> {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
- match Readable::read(reader)? {
- 0u8 => Ok(None),
- 1u8 => {
- let vlen: u64 = Readable::read(reader)?;
- let mut ret = Vec::with_capacity(cmp::min(vlen as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<Option<(usize, Signature)>>()));
- for _ in 0..vlen {
- ret.push(match Readable::read(reader)? {
- 0u8 => None,
- 1u8 => Some((<u64 as Readable>::read(reader)? as usize, Readable::read(reader)?)),
- _ => return Err(DecodeError::InvalidValue)
- });
- }
- Ok(Some(ret))
- },
- _ => Err(DecodeError::InvalidValue),
- }
- }
-}
-
-impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match self {
- &Some(ref vec) => {
- 1u8.write(writer)?;
- (vec.len() as u64).write(writer)?;
- for opt in vec.iter() {
- match opt {
- &Some((ref idx, ref sig)) => {
- 1u8.write(writer)?;
- (*idx as u64).write(writer)?;
- sig.write(writer)?;
- },
- &None => 0u8.write(writer)?,
- }
- }
- },
- &None => 0u8.write(writer)?,
- }
- Ok(())
- }
-}
-
-
-/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
-/// do RBF bumping if possible.
-pub struct OnchainTxHandler<ChannelSigner: Sign> {
- destination_script: Script,
- holder_commitment: HolderCommitmentTransaction,
- // holder_htlc_sigs and prev_holder_htlc_sigs are in the order as they appear in the commitment
- // transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
- // the set of HTLCs in the HolderCommitmentTransaction.
- holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
- prev_holder_commitment: Option<HolderCommitmentTransaction>,
- prev_holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
-
- pub(super) signer: ChannelSigner,
- pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
-
- // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
- // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
- // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
- // same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
- // block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
- // equality between spending transaction and claim request. If true, it means transaction was one our claiming one
- // after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
- // we need to regenerate new claim request with reduced set of still-claimable outpoints.
- // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
- // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
- // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
- #[cfg(test)] // Used in functional_test to verify sanitization
- pub(crate) pending_claim_requests: HashMap<Txid, PackageTemplate>,
- #[cfg(not(test))]
- pending_claim_requests: HashMap<Txid, PackageTemplate>,
-
- // Used to link outpoints claimed in a connected block to a pending claim request.
- // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
- // Value is (pending claim request identifier, confirmation_block), identifier
- // is txid of the initial claiming transaction and is immutable until outpoint is
- // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
- // block with output gets disconnected.
- #[cfg(test)] // Used in functional_test to verify sanitization
- pub claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
- #[cfg(not(test))]
- claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
-
- onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
-
- latest_height: u32,
-
- pub(super) secp_ctx: Secp256k1<secp256k1::All>,
-}
-
-const SERIALIZATION_VERSION: u8 = 1;
-const MIN_SERIALIZATION_VERSION: u8 = 1;
-
-impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
- pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
-
- self.destination_script.write(writer)?;
- self.holder_commitment.write(writer)?;
- self.holder_htlc_sigs.write(writer)?;
- self.prev_holder_commitment.write(writer)?;
- self.prev_holder_htlc_sigs.write(writer)?;
-
- self.channel_transaction_parameters.write(writer)?;
-
- let mut key_data = VecWriter(Vec::new());
- self.signer.write(&mut key_data)?;
- assert!(key_data.0.len() < core::usize::MAX);
- assert!(key_data.0.len() < core::u32::MAX as usize);
- (key_data.0.len() as u32).write(writer)?;
- writer.write_all(&key_data.0[..])?;
-
- writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
- for (ref ancestor_claim_txid, request) in self.pending_claim_requests.iter() {
- ancestor_claim_txid.write(writer)?;
- request.write(writer)?;
- }
-
- writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
- for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
- outp.write(writer)?;
- claim_and_height.0.write(writer)?;
- claim_and_height.1.write(writer)?;
- }
-
- writer.write_all(&byte_utils::be64_to_array(self.onchain_events_awaiting_threshold_conf.len() as u64))?;
- for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
- entry.txid.write(writer)?;
- writer.write_all(&byte_utils::be32_to_array(entry.height))?;
- match entry.event {
- OnchainEvent::Claim { ref claim_request } => {
- writer.write_all(&[0; 1])?;
- claim_request.write(writer)?;
- },
- OnchainEvent::ContentiousOutpoint { ref package } => {
- writer.write_all(&[1; 1])?;
- package.write(writer)?;
- }
- }
- }
- self.latest_height.write(writer)?;
-
- write_tlv_fields!(writer, {}, {});
- Ok(())
- }
-}
-
-impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
- fn read<R: ::std::io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
- let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
-
- let destination_script = Readable::read(reader)?;
-
- let holder_commitment = Readable::read(reader)?;
- let holder_htlc_sigs = Readable::read(reader)?;
- let prev_holder_commitment = Readable::read(reader)?;
- let prev_holder_htlc_sigs = Readable::read(reader)?;
-
- let channel_parameters = Readable::read(reader)?;
-
- let keys_len: u32 = Readable::read(reader)?;
- let mut keys_data = Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE));
- while keys_data.len() != keys_len as usize {
- // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
- let mut data = [0; 1024];
- let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.len())];
- reader.read_exact(read_slice)?;
- keys_data.extend_from_slice(read_slice);
- }
- let signer = keys_manager.read_chan_signer(&keys_data)?;
-
- let pending_claim_requests_len: u64 = Readable::read(reader)?;
- let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
- for _ in 0..pending_claim_requests_len {
- pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
- }
-
- let claimable_outpoints_len: u64 = Readable::read(reader)?;
- let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
- for _ in 0..claimable_outpoints_len {
- let outpoint = Readable::read(reader)?;
- let ancestor_claim_txid = Readable::read(reader)?;
- let height = Readable::read(reader)?;
- claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
- }
- let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
- let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
- for _ in 0..waiting_threshold_conf_len {
- let txid = Readable::read(reader)?;
- let height = Readable::read(reader)?;
- let event = match <u8 as Readable>::read(reader)? {
- 0 => {
- let claim_request = Readable::read(reader)?;
- OnchainEvent::Claim {
- claim_request
- }
- },
- 1 => {
- let package = Readable::read(reader)?;
- OnchainEvent::ContentiousOutpoint {
- package
- }
- }
- _ => return Err(DecodeError::InvalidValue),
- };
- onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { txid, height, event });
- }
- let latest_height = Readable::read(reader)?;
-
- read_tlv_fields!(reader, {}, {});
-
- let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
-
- Ok(OnchainTxHandler {
- destination_script,
- holder_commitment,
- holder_htlc_sigs,
- prev_holder_commitment,
- prev_holder_htlc_sigs,
- signer,
- channel_transaction_parameters: channel_parameters,
- claimable_outpoints,
- pending_claim_requests,
- onchain_events_awaiting_threshold_conf,
- latest_height,
- secp_ctx,
- })
- }
-}
-
-impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
- pub(crate) fn new(destination_script: Script, signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1<secp256k1::All>) -> Self {
- OnchainTxHandler {
- destination_script,
- holder_commitment,
- holder_htlc_sigs: None,
- prev_holder_commitment: None,
- prev_holder_htlc_sigs: None,
- signer,
- channel_transaction_parameters: channel_parameters,
- pending_claim_requests: HashMap::new(),
- claimable_outpoints: HashMap::new(),
- onchain_events_awaiting_threshold_conf: Vec::new(),
- latest_height: 0,
-
- secp_ctx,
- }
- }
-
- /// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
- /// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
- /// height than once reached we should generate a new bumped "version" of the claim tx to be sure than we safely claim outputs before
- /// than our counterparty can do it too. If timelock expires soon, height timer is going to be scale down in consequence to increase
- /// frequency of the bump and so increase our bets of success.
- fn get_height_timer(current_height: u32, timelock_expiration: u32) -> u32 {
- if timelock_expiration <= current_height + 3 {
- return current_height + 1
- } else if timelock_expiration - current_height <= 15 {
- return current_height + 3
- }
- current_height + 15
- }
-
- /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
- /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
- /// Panics if there are signing errors, because signing operations in reaction to on-chain events
- /// are not expected to fail, and if they do, we may lose funds.
- fn generate_claim_tx<F: Deref, L: Deref>(&mut self, height: u32, cached_request: &PackageTemplate, fee_estimator: &F, logger: &L) -> Option<(Option<u32>, u64, Transaction)>
- where F::Target: FeeEstimator,
- L::Target: Logger,
- {
- if cached_request.outpoints().len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
-
- // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
- // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
- let new_timer = Some(Self::get_height_timer(height, cached_request.timelock()));
- let amt = cached_request.package_amount();
- if cached_request.is_malleable() {
- let predicted_weight = cached_request.package_weight(&self.destination_script);
- if let Some((output_value, new_feerate)) = package::compute_output_value(predicted_weight, amt, cached_request.feerate(), fee_estimator, logger) {
- assert!(new_feerate != 0);
-
- let transaction = cached_request.finalize_package(self, output_value, self.destination_script.clone(), logger).unwrap();
- log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate);
- assert!(predicted_weight >= transaction.get_weight());
- return Some((new_timer, new_feerate, transaction))
- }
- } else {
- // Note: Currently, amounts of holder outputs spending witnesses aren't used
- // as we can't malleate spending package to increase their feerate. This
- // should change with the remaining anchor output patchset.
- debug_assert!(amt == 0);
- if let Some(transaction) = cached_request.finalize_package(self, amt, self.destination_script.clone(), logger) {
- return Some((None, 0, transaction));
- }
- }
- None
- }
-
- /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
- /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
- /// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
- /// if we receive a preimage after force-close.
- pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], requests: Vec<PackageTemplate>, latest_height: Option<u32>, broadcaster: &B, fee_estimator: &F, logger: &L)
- where B::Target: BroadcasterInterface,
- F::Target: FeeEstimator,
- L::Target: Logger,
- {
- let height = match latest_height {
- Some(h) => h,
- None => self.latest_height,
- };
- log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
- let mut preprocessed_requests = Vec::with_capacity(requests.len());
- let mut aggregated_request = None;
-
- // Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
- // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
- for req in requests {
- // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
- if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) { log_trace!(logger, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout); } else {
- log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), height + CLTV_SHARED_CLAIM_BUFFER);
- if req.timelock() <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
- // Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
- preprocessed_requests.push(req);
- } else if aggregated_request.is_none() {
- aggregated_request = Some(req);
- } else {
- aggregated_request.as_mut().unwrap().merge_package(req);
- }
- }
- }
- if let Some(req) = aggregated_request {
- preprocessed_requests.push(req);
- }
-
- // Generate claim transactions and track them to bump if necessary at
- // height timer expiration (i.e in how many blocks we're going to take action).
- for mut req in preprocessed_requests {
- if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &req, &*fee_estimator, &*logger) {
- req.set_timer(new_timer);
- req.set_feerate(new_feerate);
- let txid = tx.txid();
- for k in req.outpoints() {
- log_trace!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
- self.claimable_outpoints.insert(k.clone(), (txid, height));
- }
- self.pending_claim_requests.insert(txid, req);
- log_trace!(logger, "Broadcasting onchain {}", log_tx!(tx));
- broadcaster.broadcast_transaction(&tx);
- }
- }
-
- let mut bump_candidates = HashMap::new();
- for tx in txn_matched {
- // Scan all input to verify is one of the outpoint spent is of interest for us
- let mut claimed_outputs_material = Vec::new();
- for inp in &tx.input {
- if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
- // If outpoint has claim request pending on it...
- if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
- //... we need to verify equality between transaction outpoints and claim request
- // outpoints to know if transaction is the original claim or a bumped one issued
- // by us.
- let mut set_equality = true;
- if request.outpoints().len() != tx.input.len() {
- set_equality = false;
- } else {
- for (claim_inp, tx_inp) in request.outpoints().iter().zip(tx.input.iter()) {
- if **claim_inp != tx_inp.previous_output {
- set_equality = false;
- }
- }
- }
-
- macro_rules! clean_claim_request_after_safety_delay {
- () => {
- let entry = OnchainEventEntry {
- txid: tx.txid(),
- height,
- event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }
- };
- if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
- }
-
- // If this is our transaction (or our counterparty spent all the outputs
- // before we could anyway with same inputs order than us), wait for
- // ANTI_REORG_DELAY and clean the RBF tracking map.
- if set_equality {
- clean_claim_request_after_safety_delay!();
- } else { // If false, generate new claim request with update outpoint set
- let mut at_least_one_drop = false;
- for input in tx.input.iter() {
- if let Some(package) = request.split_package(&input.previous_output) {
- claimed_outputs_material.push(package);
- at_least_one_drop = true;
- }
- // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
- if request.outpoints().is_empty() {
- clean_claim_request_after_safety_delay!();
- }
- }
- //TODO: recompute soonest_timelock to avoid wasting a bit on fees
- if at_least_one_drop {
- bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone());
- }
- }
- break; //No need to iterate further, either tx is our or their
- } else {
- panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
- }
- }
- }
- for package in claimed_outputs_material.drain(..) {
- let entry = OnchainEventEntry {
- txid: tx.txid(),
- height,
- event: OnchainEvent::ContentiousOutpoint { package },
- };
- if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
- }
-
- // After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
- let onchain_events_awaiting_threshold_conf =
- self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
- for entry in onchain_events_awaiting_threshold_conf {
- if entry.has_reached_confirmation_threshold(height) {
- match entry.event {
- OnchainEvent::Claim { claim_request } => {
- // We may remove a whole set of claim outpoints here, as these one may have
- // been aggregated in a single tx and claimed so atomically
- if let Some(request) = self.pending_claim_requests.remove(&claim_request) {
- for outpoint in request.outpoints() {
- self.claimable_outpoints.remove(&outpoint);
- }
- }
- },
- OnchainEvent::ContentiousOutpoint { package } => {
- self.claimable_outpoints.remove(&package.outpoints()[0]);
- }
- }
- } else {
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
-
- // Check if any pending claim request must be rescheduled
- for (first_claim_txid, ref request) in self.pending_claim_requests.iter() {
- if let Some(h) = request.timer() {
- if height >= h {
- bump_candidates.insert(*first_claim_txid, (*request).clone());
- }
- }
- }
-
- // Build, bump and rebroadcast tx accordingly
- log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
- for (first_claim_txid, request) in bump_candidates.iter() {
- if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) {
- log_trace!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
- broadcaster.broadcast_transaction(&bump_tx);
- if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
- request.set_timer(new_timer);
- request.set_feerate(new_feerate);
- }
- }
- }
- }
-
- pub(crate) fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
- &mut self,
- txid: &Txid,
- broadcaster: B,
- fee_estimator: F,
- logger: L,
- ) where
- B::Target: BroadcasterInterface,
- F::Target: FeeEstimator,
- L::Target: Logger,
- {
- let mut height = None;
- for entry in self.onchain_events_awaiting_threshold_conf.iter() {
- if entry.txid == *txid {
- height = Some(entry.height);
- break;
- }
- }
-
- if let Some(height) = height {
- self.block_disconnected(height, broadcaster, fee_estimator, logger);
- }
- }
-
- pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F, logger: L)
- where B::Target: BroadcasterInterface,
- F::Target: FeeEstimator,
- L::Target: Logger,
- {
- let mut bump_candidates = HashMap::new();
- let onchain_events_awaiting_threshold_conf =
- self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
- for entry in onchain_events_awaiting_threshold_conf {
- if entry.height >= height {
- //- our claim tx on a commitment tx output
- //- resurect outpoint back in its claimable set and regenerate tx
- match entry.event {
- OnchainEvent::ContentiousOutpoint { package } => {
- if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&package.outpoints()[0]) {
- if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
- request.merge_package(package);
- // Using a HashMap guarantee us than if we have multiple outpoints getting
- // resurrected only one bump claim tx is going to be broadcast
- bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone());
- }
- }
- },
- _ => {},
- }
- } else {
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
- for (_, request) in bump_candidates.iter_mut() {
- if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &&*fee_estimator, &&*logger) {
- request.set_timer(new_timer);
- request.set_feerate(new_feerate);
- log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
- broadcaster.broadcast_transaction(&bump_tx);
- }
- }
- for (ancestor_claim_txid, request) in bump_candidates.drain() {
- self.pending_claim_requests.insert(ancestor_claim_txid.0, request);
- }
- //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
- // right now if one of the outpoint get disconnected, just erase whole pending claim request.
- let mut remove_request = Vec::new();
- self.claimable_outpoints.retain(|_, ref v|
- if v.1 >= height {
- remove_request.push(v.0.clone());
- false
- } else { true });
- for req in remove_request {
- self.pending_claim_requests.remove(&req);
- }
- }
-
- pub(crate) fn get_relevant_txids(&self) -> Vec<Txid> {
- let mut txids: Vec<Txid> = self.onchain_events_awaiting_threshold_conf
- .iter()
- .map(|entry| entry.txid)
- .collect();
- txids.sort_unstable();
- txids.dedup();
- txids
- }
-
- pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
- self.prev_holder_commitment = Some(replace(&mut self.holder_commitment, tx));
- self.holder_htlc_sigs = None;
- }
-
- // Normally holder HTLCs are signed at the same time as the holder commitment tx. However,
- // in some configurations, the holder commitment tx has been signed and broadcast by a
- // ChannelMonitor replica, so we handle that case here.
- fn sign_latest_holder_htlcs(&mut self) {
- if self.holder_htlc_sigs.is_none() {
- let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
- self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, sigs));
- }
- }
-
- // Normally only the latest commitment tx and HTLCs need to be signed. However, in some
- // configurations we may have updated our holder commitment but a replica of the ChannelMonitor
- // broadcast the previous one before we sync with it. We handle that case here.
- fn sign_prev_holder_htlcs(&mut self) {
- if self.prev_holder_htlc_sigs.is_none() {
- if let Some(ref holder_commitment) = self.prev_holder_commitment {
- let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(holder_commitment, &self.secp_ctx).expect("sign previous holder commitment");
- self.prev_holder_htlc_sigs = Some(Self::extract_holder_sigs(holder_commitment, sigs));
- }
- }
- }
-
- fn extract_holder_sigs(holder_commitment: &HolderCommitmentTransaction, sigs: Vec<Signature>) -> Vec<Option<(usize, Signature)>> {
- let mut ret = Vec::new();
- for (htlc_idx, (holder_sig, htlc)) in sigs.iter().zip(holder_commitment.htlcs().iter()).enumerate() {
- let tx_idx = htlc.transaction_output_index.unwrap();
- if ret.len() <= tx_idx as usize { ret.resize(tx_idx as usize + 1, None); }
- ret[tx_idx as usize] = Some((htlc_idx, holder_sig.clone()));
- }
- ret
- }
-
- //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
- // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
- // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
- // to monitor before.
- pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
- let (sig, htlc_sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("signing holder commitment");
- self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
- self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
- }
-
- #[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
- pub(crate) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
- let (sig, htlc_sigs) = self.signer.unsafe_sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
- self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
- self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
- }
-
- pub(crate) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
- let mut htlc_tx = None;
- let commitment_txid = self.holder_commitment.trust().txid();
- // Check if the HTLC spends from the current holder commitment
- if commitment_txid == outp.txid {
- self.sign_latest_holder_htlcs();
- if let &Some(ref htlc_sigs) = &self.holder_htlc_sigs {
- let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
- let trusted_tx = self.holder_commitment.trust();
- let counterparty_htlc_sig = self.holder_commitment.counterparty_htlc_sigs[*htlc_idx];
- htlc_tx = Some(trusted_tx
- .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
- }
- }
- // If the HTLC doesn't spend the current holder commitment, check if it spends the previous one
- if htlc_tx.is_none() && self.prev_holder_commitment.is_some() {
- let commitment_txid = self.prev_holder_commitment.as_ref().unwrap().trust().txid();
- if commitment_txid == outp.txid {
- self.sign_prev_holder_htlcs();
- if let &Some(ref htlc_sigs) = &self.prev_holder_htlc_sigs {
- let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
- let holder_commitment = self.prev_holder_commitment.as_ref().unwrap();
- let trusted_tx = holder_commitment.trust();
- let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[*htlc_idx];
- htlc_tx = Some(trusted_tx
- .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
- }
- }
- }
- htlc_tx
- }
-
- #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
- pub(crate) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
- let latest_had_sigs = self.holder_htlc_sigs.is_some();
- let prev_had_sigs = self.prev_holder_htlc_sigs.is_some();
- let ret = self.get_fully_signed_htlc_tx(outp, preimage);
- if !latest_had_sigs {
- self.holder_htlc_sigs = None;
- }
- if !prev_had_sigs {
- self.prev_holder_htlc_sigs = None;
- }
- ret
- }
-}
+++ /dev/null
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-//! Various utilities to assemble claimable outpoints in package of one or more transactions. Those
-//! packages are attached metadata, guiding their aggregable or fee-bumping re-schedule. This file
-//! also includes witness weight computation and fee computation methods.
-
-use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
-use bitcoin::blockdata::transaction::{TxOut,TxIn, Transaction, SigHashType};
-use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
-use bitcoin::blockdata::script::Script;
-
-use bitcoin::hash_types::Txid;
-
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
-
-use ln::PaymentPreimage;
-use ln::chan_utils::{TxCreationKeys, HTLCOutputInCommitment, HTLC_OUTPUT_IN_COMMITMENT_SIZE};
-use ln::chan_utils;
-use ln::msgs::DecodeError;
-use ln::onchaintx::OnchainTxHandler;
-use chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
-use chain::keysinterface::Sign;
-use util::byte_utils;
-use util::logger::Logger;
-use util::ser::{Readable, Writer, Writeable};
-
-use std::cmp;
-use std::mem;
-use std::ops::Deref;
-
-const MAX_ALLOC_SIZE: usize = 64*1024;
-
-
-// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
-pub(crate) const WEIGHT_REVOKED_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 133;
-// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
-pub(crate) const WEIGHT_REVOKED_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 139;
-// number_of_witness_elements + sig_length + counterpartyhtlc_sig + preimage_length + preimage + witness_script_length + witness_script
-pub(crate) const WEIGHT_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 32 + 1 + 133;
-// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
-pub(crate) const WEIGHT_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 139;
-// number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
-pub(crate) const WEIGHT_REVOKED_OUTPUT: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 77;
-
-/// A struct to describe a revoked output and corresponding information to generate a solving
-/// witness spending a commitment `to_local` output or a second-stage HTLC transaction output.
-///
-/// CSV and pubkeys are used as part of a witnessScript redeeming a balance output, amount is used
-/// as part of the signature hash and revocation secret to generate a satisfying witness.
-#[derive(Clone, PartialEq)]
-pub(crate) struct RevokedOutput {
- per_commitment_point: PublicKey,
- counterparty_delayed_payment_base_key: PublicKey,
- counterparty_htlc_base_key: PublicKey,
- per_commitment_key: SecretKey,
- weight: u64,
- amount: u64,
- on_counterparty_tx_csv: u16,
-}
-
-impl RevokedOutput {
- pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, on_counterparty_tx_csv: u16) -> Self {
- RevokedOutput {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- per_commitment_key,
- weight: WEIGHT_REVOKED_OUTPUT,
- amount,
- on_counterparty_tx_csv
- }
- }
-}
-
-impl_writeable!(RevokedOutput, 33*3 + 32 + 8 + 8 + 2, {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- per_commitment_key,
- weight,
- amount,
- on_counterparty_tx_csv
-});
-
-/// A struct to describe a revoked offered output and corresponding information to generate a
-/// solving witness.
-///
-/// HTLCOuputInCommitment (hash timelock, direction) and pubkeys are used to generate a suitable
-/// witnessScript.
-///
-/// CSV is used as part of a witnessScript redeeming a balance output, amount is used as part
-/// of the signature hash and revocation secret to generate a satisfying witness.
-#[derive(Clone, PartialEq)]
-pub(crate) struct RevokedHTLCOutput {
- per_commitment_point: PublicKey,
- counterparty_delayed_payment_base_key: PublicKey,
- counterparty_htlc_base_key: PublicKey,
- per_commitment_key: SecretKey,
- weight: u64,
- amount: u64,
- htlc: HTLCOutputInCommitment,
-}
-
-impl RevokedHTLCOutput {
- pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment) -> Self {
- let weight = if htlc.offered { WEIGHT_REVOKED_OFFERED_HTLC } else { WEIGHT_REVOKED_RECEIVED_HTLC };
- RevokedHTLCOutput {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- per_commitment_key,
- weight,
- amount,
- htlc
- }
- }
-}
-
-impl_writeable!(RevokedHTLCOutput, 33*3 + 32 + 8 + 8 + HTLC_OUTPUT_IN_COMMITMENT_SIZE, {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- per_commitment_key,
- weight,
- amount,
- htlc
-});
-
-/// A struct to describe a HTLC output on a counterparty commitment transaction.
-///
-/// HTLCOutputInCommitment (hash, timelock, directon) and pubkeys are used to generate a suitable
-/// witnessScript.
-///
-/// The preimage is used as part of the witness.
-#[derive(Clone, PartialEq)]
-pub(crate) struct CounterpartyOfferedHTLCOutput {
- per_commitment_point: PublicKey,
- counterparty_delayed_payment_base_key: PublicKey,
- counterparty_htlc_base_key: PublicKey,
- preimage: PaymentPreimage,
- htlc: HTLCOutputInCommitment
-}
-
-impl CounterpartyOfferedHTLCOutput {
- pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment) -> Self {
- CounterpartyOfferedHTLCOutput {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- preimage,
- htlc
- }
- }
-}
-
-impl_writeable!(CounterpartyOfferedHTLCOutput, 33*3 + 32 + HTLC_OUTPUT_IN_COMMITMENT_SIZE, {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- preimage,
- htlc
-});
-
-/// A struct to describe a HTLC output on a counterparty commitment transaction.
-///
-/// HTLCOutputInCommitment (hash, timelock, directon) and pubkeys are used to generate a suitable
-/// witnessScript.
-#[derive(Clone, PartialEq)]
-pub(crate) struct CounterpartyReceivedHTLCOutput {
- per_commitment_point: PublicKey,
- counterparty_delayed_payment_base_key: PublicKey,
- counterparty_htlc_base_key: PublicKey,
- htlc: HTLCOutputInCommitment
-}
-
-impl CounterpartyReceivedHTLCOutput {
- pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, htlc: HTLCOutputInCommitment) -> Self {
- CounterpartyReceivedHTLCOutput {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- htlc
- }
- }
-}
-
-impl_writeable!(CounterpartyReceivedHTLCOutput, 33*3 + HTLC_OUTPUT_IN_COMMITMENT_SIZE, {
- per_commitment_point,
- counterparty_delayed_payment_base_key,
- counterparty_htlc_base_key,
- htlc
-});
-
-/// A struct to describe a HTLC output on holder commitment transaction.
-///
-/// Either offered or received, the amount is always used as part of the bip143 sighash.
-/// Preimage is only included as part of the witness in former case.
-#[derive(Clone, PartialEq)]
-pub(crate) struct HolderHTLCOutput {
- preimage: Option<PaymentPreimage>,
- amount: u64,
-}
-
-impl HolderHTLCOutput {
- pub(crate) fn build(preimage: Option<PaymentPreimage>, amount: u64) -> Self {
- HolderHTLCOutput {
- preimage,
- amount
- }
- }
-}
-
-impl_writeable!(HolderHTLCOutput, 0, {
- preimage,
- amount
-});
-
-/// A struct to describe the channel output on the funding transaction.
-///
-/// witnessScript is used as part of the witness redeeming the funding utxo.
-#[derive(Clone, PartialEq)]
-pub(crate) struct HolderFundingOutput {
- funding_redeemscript: Script,
-}
-
-impl HolderFundingOutput {
- pub(crate) fn build(funding_redeemscript: Script) -> Self {
- HolderFundingOutput {
- funding_redeemscript,
- }
- }
-}
-
-impl_writeable!(HolderFundingOutput, 0, {
- funding_redeemscript
-});
-
-/// A wrapper encapsulating all in-protocol differing outputs types.
-///
-/// The generic API offers access to an outputs common attributes or allow transformation such as
-/// finalizing an input claiming the output.
-#[derive(Clone, PartialEq)]
-pub(crate) enum PackageSolvingData {
- RevokedOutput(RevokedOutput),
- RevokedHTLCOutput(RevokedHTLCOutput),
- CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput),
- CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput),
- HolderHTLCOutput(HolderHTLCOutput),
- HolderFundingOutput(HolderFundingOutput),
-}
-
-impl PackageSolvingData {
- fn amount(&self) -> u64 {
- let amt = match self {
- PackageSolvingData::RevokedOutput(ref outp) => { outp.amount },
- PackageSolvingData::RevokedHTLCOutput(ref outp) => { outp.amount },
- PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => { outp.htlc.amount_msat / 1000 },
- PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => { outp.htlc.amount_msat / 1000 },
- // Note: Currently, amounts of holder outputs spending witnesses aren't used
- // as we can't malleate spending package to increase their feerate. This
- // should change with the remaining anchor output patchset.
- PackageSolvingData::HolderHTLCOutput(..) => { 0 },
- PackageSolvingData::HolderFundingOutput(..) => { 0 },
- };
- amt
- }
- fn weight(&self) -> usize {
- let weight = match self {
- PackageSolvingData::RevokedOutput(ref outp) => { outp.weight as usize },
- PackageSolvingData::RevokedHTLCOutput(ref outp) => { outp.weight as usize },
- PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { WEIGHT_OFFERED_HTLC as usize },
- PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { WEIGHT_RECEIVED_HTLC as usize },
- // Note: Currently, weights of holder outputs spending witnesses aren't used
- // as we can't malleate spending package to increase their feerate. This
- // should change with the remaining anchor output patchset.
- PackageSolvingData::HolderHTLCOutput(..) => { debug_assert!(false); 0 },
- PackageSolvingData::HolderFundingOutput(..) => { debug_assert!(false); 0 },
- };
- weight
- }
- fn is_compatible(&self, input: &PackageSolvingData) -> bool {
- match self {
- PackageSolvingData::RevokedOutput(..) => {
- match input {
- PackageSolvingData::RevokedHTLCOutput(..) => { true },
- PackageSolvingData::RevokedOutput(..) => { true },
- _ => { false }
- }
- },
- PackageSolvingData::RevokedHTLCOutput(..) => {
- match input {
- PackageSolvingData::RevokedOutput(..) => { true },
- PackageSolvingData::RevokedHTLCOutput(..) => { true },
- _ => { false }
- }
- },
- _ => { mem::discriminant(self) == mem::discriminant(&input) }
- }
- }
- fn finalize_input<Signer: Sign>(&self, bumped_tx: &mut Transaction, i: usize, onchain_handler: &mut OnchainTxHandler<Signer>) -> bool {
- match self {
- PackageSolvingData::RevokedOutput(ref outp) => {
- if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
- let witness_script = chan_utils::get_revokeable_redeemscript(&chan_keys.revocation_key, outp.on_counterparty_tx_csv, &chan_keys.broadcaster_delayed_payment_key);
- //TODO: should we panic on signer failure ?
- if let Ok(sig) = onchain_handler.signer.sign_justice_revoked_output(&bumped_tx, i, outp.amount, &outp.per_commitment_key, &onchain_handler.secp_ctx) {
- bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
- bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
- bumped_tx.input[i].witness.push(vec!(1));
- bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
- } else { return false; }
- }
- },
- PackageSolvingData::RevokedHTLCOutput(ref outp) => {
- if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
- let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
- //TODO: should we panic on signer failure ?
- if let Ok(sig) = onchain_handler.signer.sign_justice_revoked_htlc(&bumped_tx, i, outp.amount, &outp.per_commitment_key, &outp.htlc, &onchain_handler.secp_ctx) {
- bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
- bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
- bumped_tx.input[i].witness.push(chan_keys.revocation_key.clone().serialize().to_vec());
- bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
- } else { return false; }
- }
- },
- PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => {
- if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
- let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
-
- if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
- bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
- bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
- bumped_tx.input[i].witness.push(outp.preimage.0.to_vec());
- bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
- }
- }
- },
- PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => {
- if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
- let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
-
- bumped_tx.lock_time = outp.htlc.cltv_expiry; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
- if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
- bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
- bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
- // Due to BIP146 (MINIMALIF) this must be a zero-length element to relay.
- bumped_tx.input[i].witness.push(vec![]);
- bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
- }
- }
- },
- _ => { panic!("API Error!"); }
- }
- true
- }
- fn get_finalized_tx<Signer: Sign>(&self, outpoint: &BitcoinOutPoint, onchain_handler: &mut OnchainTxHandler<Signer>) -> Option<Transaction> {
- match self {
- PackageSolvingData::HolderHTLCOutput(ref outp) => { return onchain_handler.get_fully_signed_htlc_tx(outpoint, &outp.preimage); }
- PackageSolvingData::HolderFundingOutput(ref outp) => { return Some(onchain_handler.get_fully_signed_holder_tx(&outp.funding_redeemscript)); }
- _ => { panic!("API Error!"); }
- }
- }
-}
-
-impl Writeable for PackageSolvingData {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match self {
- PackageSolvingData::RevokedOutput(ref revoked_outp) => {
- 0u8.write(writer)?;
- revoked_outp.write(writer)?;
- },
- PackageSolvingData::RevokedHTLCOutput(ref revoked_outp) => {
- 1u8.write(writer)?;
- revoked_outp.write(writer)?;
- },
- PackageSolvingData::CounterpartyOfferedHTLCOutput(ref counterparty_outp) => {
- 2u8.write(writer)?;
- counterparty_outp.write(writer)?;
- },
- PackageSolvingData::CounterpartyReceivedHTLCOutput(ref counterparty_outp) => {
- 3u8.write(writer)?;
- counterparty_outp.write(writer)?;
- },
- PackageSolvingData::HolderHTLCOutput(ref holder_outp) => {
- 4u8.write(writer)?;
- holder_outp.write(writer)?;
- },
- PackageSolvingData::HolderFundingOutput(ref funding_outp) => {
- 5u8.write(writer)?;
- funding_outp.write(writer)?;
- }
- }
- Ok(())
- }
-}
-
-impl Readable for PackageSolvingData {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
- let byte = <u8 as Readable>::read(reader)?;
- let solving_data = match byte {
- 0 => {
- PackageSolvingData::RevokedOutput(Readable::read(reader)?)
- },
- 1 => {
- PackageSolvingData::RevokedHTLCOutput(Readable::read(reader)?)
- },
- 2 => {
- PackageSolvingData::CounterpartyOfferedHTLCOutput(Readable::read(reader)?)
- },
- 3 => {
- PackageSolvingData::CounterpartyReceivedHTLCOutput(Readable::read(reader)?)
- },
- 4 => {
- PackageSolvingData::HolderHTLCOutput(Readable::read(reader)?)
- },
- 5 => {
- PackageSolvingData::HolderFundingOutput(Readable::read(reader)?)
- }
- _ => return Err(DecodeError::UnknownVersion)
- };
- Ok(solving_data)
- }
-}
-
-/// A malleable package might be aggregated with other packages to save on fees.
-/// A untractable package has been counter-signed and aggregable will break cached counterparty
-/// signatures.
-#[derive(Clone, PartialEq)]
-pub(crate) enum PackageMalleability {
- Malleable,
- Untractable,
-}
-
-/// A structure to describe a package content that is generated by ChannelMonitor and
-/// used by OnchainTxHandler to generate and broadcast transactions settling onchain claims.
-///
-/// A package is defined as one or more transactions claiming onchain outputs in reaction
-/// to confirmation of a channel transaction. Those packages might be aggregated to save on
-/// fees, if satisfaction of outputs's witnessScript let's us do so.
-///
-/// As packages are time-sensitive, we fee-bump and rebroadcast them at scheduled intervals.
-/// Failing to confirm a package translate as a loss of funds for the user.
-#[derive(Clone, PartialEq)]
-pub struct PackageTemplate {
- // List of onchain outputs and solving data to generate satisfying witnesses.
- inputs: Vec<(BitcoinOutPoint, PackageSolvingData)>,
- // Packages are deemed as malleable if we have local knwoledge of at least one set of
- // private keys yielding a satisfying witnesses. Malleability implies that we can aggregate
- // packages among them to save on fees or rely on RBF to bump their feerates.
- // Untractable packages have been counter-signed and thus imply that we can't aggregate
- // them without breaking signatures. Fee-bumping strategy will also rely on CPFP.
- malleability: PackageMalleability,
- // Block height after which the earlier-output belonging to this package is mature for a
- // competing claim by the counterparty. As our chain tip becomes nearer from the timelock,
- // the fee-bumping frequency will increase. See `OnchainTxHandler::get_height_timer`.
- soonest_conf_deadline: u32,
- // Determines if this package can be aggregated.
- // Timelocked outputs belonging to the same transaction might have differing
- // satisfying heights. Picking up the later height among the output set would be a valid
- // aggregable strategy but it comes with at least 2 trade-offs :
- // * earlier-output fund are going to take longer to come back
- // * CLTV delta backing up a corresponding HTLC on an upstream channel could be swallowed
- // by the requirement of the later-output part of the set
- // For now, we mark such timelocked outputs as non-aggregable, though we might introduce
- // smarter aggregable strategy in the future.
- aggregable: bool,
- // Cache of package feerate committed at previous (re)broadcast. If bumping resources
- // (either claimed output value or external utxo), it will keep increasing until holder
- // or counterparty successful claim.
- feerate_previous: u64,
- // Cache of next height at which fee-bumping and rebroadcast will be attempted. In
- // the future, we might abstract it to an observed mempool fluctuation.
- height_timer: Option<u32>,
- // Confirmation height of the claimed outputs set transaction. In case of reorg reaching
- // it, we wipe out and forget the package.
- height_original: u32,
-}
-
-impl PackageTemplate {
- pub(crate) fn is_malleable(&self) -> bool {
- self.malleability == PackageMalleability::Malleable
- }
- pub(crate) fn timelock(&self) -> u32 {
- self.soonest_conf_deadline
- }
- pub(crate) fn aggregable(&self) -> bool {
- self.aggregable
- }
- pub(crate) fn feerate(&self) -> u64 {
- self.feerate_previous
- }
- pub(crate) fn set_feerate(&mut self, new_feerate: u64) {
- self.feerate_previous = new_feerate;
- }
- pub(crate) fn timer(&self) -> Option<u32> {
- if let Some(ref timer) = self.height_timer {
- return Some(*timer);
- }
- None
- }
- pub(crate) fn set_timer(&mut self, new_timer: Option<u32>) {
- self.height_timer = new_timer;
- }
- pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> {
- self.inputs.iter().map(|(o, _)| o).collect()
- }
- pub(crate) fn split_package(&mut self, split_outp: &BitcoinOutPoint) -> Option<PackageTemplate> {
- match self.malleability {
- PackageMalleability::Malleable => {
- let mut split_package = None;
- let timelock = self.soonest_conf_deadline;
- let aggregable = self.aggregable;
- let feerate_previous = self.feerate_previous;
- let height_timer = self.height_timer;
- let height_original = self.height_original;
- self.inputs.retain(|outp| {
- if *split_outp == outp.0 {
- split_package = Some(PackageTemplate {
- inputs: vec![(outp.0, outp.1.clone())],
- malleability: PackageMalleability::Malleable,
- soonest_conf_deadline: timelock,
- aggregable,
- feerate_previous,
- height_timer,
- height_original,
- });
- return false;
- }
- return true;
- });
- return split_package;
- },
- _ => {
- // Note, we may try to split on remote transaction for
- // which we don't have a competing one (HTLC-Success before
- // timelock expiration). This explain we don't panic!
- // We should refactor OnchainTxHandler::block_connected to
- // only test equality on competing claims.
- return None;
- }
- }
- }
- pub(crate) fn merge_package(&mut self, mut merge_from: PackageTemplate) {
- assert_eq!(self.height_original, merge_from.height_original);
- if self.malleability == PackageMalleability::Untractable || merge_from.malleability == PackageMalleability::Untractable {
- panic!("Merging template on untractable packages");
- }
- if !self.aggregable || !merge_from.aggregable {
- panic!("Merging non aggregatable packages");
- }
- if let Some((_, lead_input)) = self.inputs.first() {
- for (_, v) in merge_from.inputs.iter() {
- if !lead_input.is_compatible(v) { panic!("Merging outputs from differing types !"); }
- }
- } else { panic!("Merging template on an empty package"); }
- for (k, v) in merge_from.inputs.drain(..) {
- self.inputs.push((k, v));
- }
- //TODO: verify coverage and sanity?
- if self.soonest_conf_deadline > merge_from.soonest_conf_deadline {
- self.soonest_conf_deadline = merge_from.soonest_conf_deadline;
- }
- if self.feerate_previous > merge_from.feerate_previous {
- self.feerate_previous = merge_from.feerate_previous;
- }
- self.height_timer = cmp::min(self.height_timer, merge_from.height_timer);
- }
- pub(crate) fn package_amount(&self) -> u64 {
- let mut amounts = 0;
- for (_, outp) in self.inputs.iter() {
- amounts += outp.amount();
- }
- amounts
- }
- pub(crate) fn package_weight(&self, destination_script: &Script) -> usize {
- let mut inputs_weight = 0;
- let mut witnesses_weight = 2; // count segwit flags
- for (_, outp) in self.inputs.iter() {
- // previous_out_point: 36 bytes ; var_int: 1 byte ; sequence: 4 bytes
- inputs_weight += 41 * WITNESS_SCALE_FACTOR;
- witnesses_weight += outp.weight();
- }
- // version: 4 bytes ; count_tx_in: 1 byte ; count_tx_out: 1 byte ; lock_time: 4 bytes
- let transaction_weight = 10 * WITNESS_SCALE_FACTOR;
- // value: 8 bytes ; var_int: 1 byte ; pk_script: `destination_script.len()`
- let output_weight = (8 + 1 + destination_script.len()) * WITNESS_SCALE_FACTOR;
- inputs_weight + witnesses_weight + transaction_weight + output_weight
- }
- pub(crate) fn finalize_package<L: Deref, Signer: Sign>(&self, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64, destination_script: Script, logger: &L) -> Option<Transaction>
- where L::Target: Logger,
- {
- match self.malleability {
- PackageMalleability::Malleable => {
- let mut bumped_tx = Transaction {
- version: 2,
- lock_time: 0,
- input: vec![],
- output: vec![TxOut {
- script_pubkey: destination_script,
- value,
- }],
- };
- for (outpoint, _) in self.inputs.iter() {
- bumped_tx.input.push(TxIn {
- previous_output: *outpoint,
- script_sig: Script::new(),
- sequence: 0xfffffffd,
- witness: Vec::new(),
- });
- }
- for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
- log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
- if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; }
- }
- log_trace!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
- return Some(bumped_tx);
- },
- PackageMalleability::Untractable => {
- if let Some((outpoint, outp)) = self.inputs.first() {
- if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
- log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
- log_trace!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
- return Some(final_tx);
- }
- return None;
- } else { panic!("API Error: Package must not be inputs empty"); }
- },
- }
- }
- pub (crate) fn build_package(txid: Txid, vout: u32, input_solving_data: PackageSolvingData, soonest_conf_deadline: u32, aggregable: bool, height_original: u32) -> Self {
- let malleability = match input_solving_data {
- PackageSolvingData::RevokedOutput(..) => { PackageMalleability::Malleable },
- PackageSolvingData::RevokedHTLCOutput(..) => { PackageMalleability::Malleable },
- PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { PackageMalleability::Malleable },
- PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { PackageMalleability::Malleable },
- PackageSolvingData::HolderHTLCOutput(..) => { PackageMalleability::Untractable },
- PackageSolvingData::HolderFundingOutput(..) => { PackageMalleability::Untractable },
- };
- let mut inputs = Vec::with_capacity(1);
- inputs.push((BitcoinOutPoint { txid, vout }, input_solving_data));
- PackageTemplate {
- inputs,
- malleability,
- soonest_conf_deadline,
- aggregable,
- feerate_previous: 0,
- height_timer: None,
- height_original,
- }
- }
-}
-
-impl Writeable for PackageTemplate {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- writer.write_all(&byte_utils::be64_to_array(self.inputs.len() as u64))?;
- for (ref outpoint, ref rev_outp) in self.inputs.iter() {
- outpoint.write(writer)?;
- rev_outp.write(writer)?;
- }
- self.soonest_conf_deadline.write(writer)?;
- self.feerate_previous.write(writer)?;
- self.height_timer.write(writer)?;
- self.height_original.write(writer)?;
- Ok(())
- }
-}
-
-impl Readable for PackageTemplate {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
- let inputs_count = <u64 as Readable>::read(reader)?;
- let mut inputs: Vec<(BitcoinOutPoint, PackageSolvingData)> = Vec::with_capacity(cmp::min(inputs_count as usize, MAX_ALLOC_SIZE / 128));
- for _ in 0..inputs_count {
- let outpoint = Readable::read(reader)?;
- let rev_outp = Readable::read(reader)?;
- inputs.push((outpoint, rev_outp));
- }
- let (malleability, aggregable) = if let Some((_, lead_input)) = inputs.first() {
- match lead_input {
- PackageSolvingData::RevokedOutput(..) => { (PackageMalleability::Malleable, true) },
- PackageSolvingData::RevokedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
- PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
- PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { (PackageMalleability::Malleable, false) },
- PackageSolvingData::HolderHTLCOutput(..) => { (PackageMalleability::Untractable, false) },
- PackageSolvingData::HolderFundingOutput(..) => { (PackageMalleability::Untractable, false) },
- }
- } else { return Err(DecodeError::InvalidValue); };
- let soonest_conf_deadline = Readable::read(reader)?;
- let feerate_previous = Readable::read(reader)?;
- let height_timer = Readable::read(reader)?;
- let height_original = Readable::read(reader)?;
- Ok(PackageTemplate {
- inputs,
- malleability,
- soonest_conf_deadline,
- aggregable,
- feerate_previous,
- height_timer,
- height_original,
- })
- }
-}
-
-/// Attempt to propose a bumping fee for a transaction from its spent output's values and predicted
-/// weight. We start with the highest priority feerate returned by the node's fee estimator then
-/// fall-back to lower priorities until we have enough value available to suck from.
-///
-/// If the proposed fee is less than the available spent output's values, we return the proposed
-/// fee and the corresponding updated feerate. If the proposed fee is equal or more than the
-/// available spent output's values, we return nothing
-fn compute_fee_from_spent_amounts<F: Deref, L: Deref>(input_amounts: u64, predicted_weight: usize, fee_estimator: &F, logger: &L) -> Option<(u64, u64)>
- where F::Target: FeeEstimator,
- L::Target: Logger,
-{
- let mut updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64;
- let mut fee = updated_feerate * (predicted_weight as u64) / 1000;
- if input_amounts <= fee {
- updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal) as u64;
- fee = updated_feerate * (predicted_weight as u64) / 1000;
- if input_amounts <= fee {
- updated_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background) as u64;
- fee = updated_feerate * (predicted_weight as u64) / 1000;
- if input_amounts <= fee {
- log_error!(logger, "Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat)",
- fee, input_amounts);
- None
- } else {
- log_warn!(logger, "Used low priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
- input_amounts);
- Some((fee, updated_feerate))
- }
- } else {
- log_warn!(logger, "Used medium priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
- input_amounts);
- Some((fee, updated_feerate))
- }
- } else {
- Some((fee, updated_feerate))
- }
-}
-
-/// Attempt to propose a bumping fee for a transaction from its spent output's values and predicted
-/// weight. If feerates proposed by the fee-estimator have been increasing since last fee-bumping
-/// attempt, use them. Otherwise, blindly bump the feerate by 25% of the previous feerate. We also
-/// verify that those bumping heuristics respect BIP125 rules 3) and 4) and if required adjust
-/// the new fee to meet the RBF policy requirement.
-fn feerate_bump<F: Deref, L: Deref>(predicted_weight: usize, input_amounts: u64, previous_feerate: u64, fee_estimator: &F, logger: &L) -> Option<(u64, u64)>
- where F::Target: FeeEstimator,
- L::Target: Logger,
-{
- // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
- let new_fee = if let Some((new_fee, _)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) {
- let updated_feerate = new_fee / (predicted_weight as u64 * 1000);
- if updated_feerate > previous_feerate {
- new_fee
- } else {
- // ...else just increase the previous feerate by 25% (because that's a nice number)
- let new_fee = previous_feerate * (predicted_weight as u64) / 750;
- if input_amounts <= new_fee {
- log_trace!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
- return None;
- }
- new_fee
- }
- } else {
- log_trace!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
- return None;
- };
-
- let previous_fee = previous_feerate * (predicted_weight as u64) / 1000;
- let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * (predicted_weight as u64) / 1000;
- // BIP 125 Opt-in Full Replace-by-Fee Signaling
- // * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions.
- // * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting.
- let new_fee = if new_fee < previous_fee + min_relay_fee {
- new_fee + previous_fee + min_relay_fee - new_fee
- } else {
- new_fee
- };
- Some((new_fee, new_fee * 1000 / (predicted_weight as u64)))
-}
-
-/// Deduce a new proposed fee from the claiming transaction output value.
-/// If the new proposed fee is superior to the consumed outpoint's value, burn everything in miner's
-/// fee to deter counterparties attacker.
-pub(crate) fn compute_output_value<F: Deref, L: Deref>(predicted_weight: usize, input_amounts: u64, previous_feerate: u64, fee_estimator: &F, logger: &L) -> Option<(u64, u64)>
- where F::Target: FeeEstimator,
- L::Target: Logger,
-{
- // If old feerate is 0, first iteration of this claim, use normal fee calculation
- if previous_feerate != 0 {
- if let Some((new_fee, feerate)) = feerate_bump(predicted_weight, input_amounts, previous_feerate, fee_estimator, logger) {
- // If new computed fee is superior at the whole claimable amount burn all in fees
- if new_fee > input_amounts {
- return Some((0, feerate));
- } else {
- return Some((input_amounts - new_fee, feerate));
- }
- }
- } else {
- if let Some((new_fee, feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) {
- return Some((input_amounts - new_fee, feerate));
- }
- }
- None
-}