use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::blockdata::script::Script;
-
+use bitcoin::hashes::Hash;
+#[cfg(anchors)]
+use bitcoin::hashes::HashEngine;
+#[cfg(anchors)]
+use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hash_types::{Txid, BlockHash};
-
use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
use bitcoin::secp256k1;
#[cfg(anchors)]
use crate::ln::chan_utils::{self, HTLCOutputInCommitment};
use crate::ln::chan_utils::{ChannelTransactionParameters, HolderCommitmentTransaction};
+use crate::chain::ClaimId;
#[cfg(anchors)]
use crate::chain::chaininterface::ConfirmationTarget;
use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
use core::mem::replace;
#[cfg(anchors)]
use core::mem::swap;
-use bitcoin::hashes::Hash;
const MAX_ALLOC_SIZE: usize = 64*1024;
/// transaction has met [`ANTI_REORG_DELAY`] confirmations, we consider it final and remove the
/// pending request.
Claim {
- package_id: PackageID,
+ claim_id: ClaimId,
},
/// The counterparty has claimed an outpoint from one of our pending requests through a
/// different transaction than ours. If our transaction was attempting to claim multiple
impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
(0, Claim) => {
- (0, package_id, required),
+ (0, claim_id, required),
},
(1, ContentiousOutpoint) => {
(0, package, required),
Event(ClaimEvent),
}
-/// An internal identifier to track pending package claims within the `OnchainTxHandler`.
-type PackageID = [u8; 32];
-
/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
/// do RBF bumping if possible.
pub struct OnchainTxHandler<ChannelSigner: WriteableEcdsaChannelSigner> {
// us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
// Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
#[cfg(test)] // Used in functional_test to verify sanitization
- pub(crate) pending_claim_requests: HashMap<PackageID, PackageTemplate>,
+ pub(crate) pending_claim_requests: HashMap<ClaimId, PackageTemplate>,
#[cfg(not(test))]
- pending_claim_requests: HashMap<PackageID, PackageTemplate>,
+ pending_claim_requests: HashMap<ClaimId, PackageTemplate>,
// Used to track external events that need to be forwarded to the `ChainMonitor`. This `Vec`
// essentially acts as an insertion-ordered `HashMap` – there should only ever be one occurrence
- // of a `PackageID`, which tracks its latest `ClaimEvent`, i.e., if a pending claim exists, and
+ // of a `ClaimId`, which tracks its latest `ClaimEvent`, i.e., if a pending claim exists, and
// a new block has been connected, resulting in a new claim, the previous will be replaced with
// the new.
//
// - A block being connected/disconnected
// - Learning the preimage for an HTLC we can claim onchain
#[cfg(anchors)]
- pending_claim_events: Vec<(PackageID, ClaimEvent)>,
+ pending_claim_events: Vec<(ClaimId, ClaimEvent)>,
// Used to link outpoints claimed in a connected block to a pending claim request. The keys
// represent the outpoints that our `ChannelMonitor` has detected we have keys/scripts to
// [`ANTI_REORG_DELAY`]. The initial confirmation block height is used to remove the entry if
// the block gets disconnected.
#[cfg(test)] // Used in functional_test to verify sanitization
- pub claimable_outpoints: HashMap<BitcoinOutPoint, (PackageID, u32)>,
+ pub claimable_outpoints: HashMap<BitcoinOutPoint, (ClaimId, u32)>,
#[cfg(not(test))]
- claimable_outpoints: HashMap<BitcoinOutPoint, (PackageID, u32)>,
+ claimable_outpoints: HashMap<BitcoinOutPoint, (ClaimId, u32)>,
locktimed_packages: BTreeMap<u32, Vec<PackageTemplate>>,
}
#[cfg(anchors)]
- pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<ClaimEvent> {
+ pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<(ClaimId, ClaimEvent)> {
let mut events = Vec::new();
swap(&mut events, &mut self.pending_claim_events);
- events.into_iter().map(|(_, event)| event).collect()
+ events
}
/// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
L::Target: Logger,
{
let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len());
- for (package_id, request) in self.pending_claim_requests.iter() {
+ for (claim_id, request) in self.pending_claim_requests.iter() {
let inputs = request.outpoints();
log_info!(logger, "Triggering rebroadcast/fee-bump for request with inputs {:?}", inputs);
- bump_requests.push((*package_id, request.clone()));
+ bump_requests.push((*claim_id, request.clone()));
}
- for (package_id, request) in bump_requests {
+ for (claim_id, request) in bump_requests {
self.generate_claim(current_height, &request, false /* force_feerate_bump */, fee_estimator, logger)
.map(|(_, new_feerate, claim)| {
let mut bumped_feerate = false;
- if let Some(mut_request) = self.pending_claim_requests.get_mut(&package_id) {
+ if let Some(mut_request) = self.pending_claim_requests.get_mut(&claim_id) {
bumped_feerate = request.previous_feerate() > new_feerate;
mut_request.set_feerate(new_feerate);
}
#[cfg(debug_assertions)] {
debug_assert!(request.requires_external_funding());
let num_existing = self.pending_claim_events.iter()
- .filter(|entry| entry.0 == package_id).count();
+ .filter(|entry| entry.0 == claim_id).count();
assert!(num_existing == 0 || num_existing == 1);
}
- self.pending_claim_events.retain(|event| event.0 != package_id);
- self.pending_claim_events.push((package_id, event));
+ self.pending_claim_events.retain(|event| event.0 != claim_id);
+ self.pending_claim_events.push((claim_id, event));
}
}
});
// transaction is reorged out.
let mut all_inputs_have_confirmed_spend = true;
for outpoint in request_outpoints.iter() {
- if let Some((request_package_id, _)) = self.claimable_outpoints.get(*outpoint) {
+ if let Some((request_claim_id, _)) = self.claimable_outpoints.get(*outpoint) {
// We check for outpoint spends within claims individually rather than as a set
// since requests can have outpoints split off.
if !self.onchain_events_awaiting_threshold_conf.iter()
- .any(|event_entry| if let OnchainEvent::Claim { package_id } = event_entry.event {
- *request_package_id == package_id
+ .any(|event_entry| if let OnchainEvent::Claim { claim_id } = event_entry.event {
+ *request_claim_id == claim_id
} else {
// The onchain event is not a claim, keep seeking until we find one.
false
) {
req.set_timer(new_timer);
req.set_feerate(new_feerate);
- let package_id = match claim {
+ let claim_id = match claim {
OnchainClaim::Tx(tx) => {
log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
broadcaster.broadcast_transactions(&[&tx]);
- tx.txid().into_inner()
+ ClaimId(tx.txid().into_inner())
},
#[cfg(anchors)]
OnchainClaim::Event(claim_event) => {
log_info!(logger, "Yielding onchain event to spend inputs {:?}", req.outpoints());
- let package_id = match claim_event {
- ClaimEvent::BumpCommitment { ref commitment_tx, .. } => commitment_tx.txid().into_inner(),
+ let claim_id = match claim_event {
+ ClaimEvent::BumpCommitment { ref commitment_tx, .. } =>
+ // For commitment claims, we can just use their txid as it should
+ // already be unique.
+ ClaimId(commitment_tx.txid().into_inner()),
ClaimEvent::BumpHTLC { ref htlcs, .. } => {
- // Use the same construction as a lightning channel id to generate
- // the package id for this request based on the first HTLC. It
- // doesn't matter what we use as long as it's unique per request.
- let mut package_id = [0; 32];
- package_id[..].copy_from_slice(&htlcs[0].commitment_txid[..]);
- let htlc_output_index = htlcs[0].htlc.transaction_output_index.unwrap();
- package_id[30] ^= ((htlc_output_index >> 8) & 0xff) as u8;
- package_id[31] ^= ((htlc_output_index >> 0) & 0xff) as u8;
- package_id
+ // For HTLC claims, commit to the entire set of HTLC outputs to
+ // claim, which will always be unique per request. Once a claim ID
+ // is generated, it is assigned and remains unchanged, even if the
+ // underlying set of HTLCs changes.
+ let mut engine = Sha256::engine();
+ for htlc in htlcs {
+ engine.input(&htlc.commitment_txid.into_inner());
+ engine.input(&htlc.htlc.transaction_output_index.unwrap().to_be_bytes());
+ }
+ ClaimId(Sha256::from_engine(engine).into_inner())
},
};
- debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == package_id).count(), 0);
- self.pending_claim_events.push((package_id, claim_event));
- package_id
+ debug_assert!(self.pending_claim_requests.get(&claim_id).is_none());
+ debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == claim_id).count(), 0);
+ self.pending_claim_events.push((claim_id, claim_event));
+ claim_id
},
};
+ debug_assert!(self.pending_claim_requests.get(&claim_id).is_none());
for k in req.outpoints() {
log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
- self.claimable_outpoints.insert(k.clone(), (package_id, conf_height));
+ self.claimable_outpoints.insert(k.clone(), (claim_id, conf_height));
}
- self.pending_claim_requests.insert(package_id, req);
+ self.pending_claim_requests.insert(claim_id, req);
}
}
}
// Scan all input to verify is one of the outpoint spent is of interest for us
let mut claimed_outputs_material = Vec::new();
for inp in &tx.input {
- if let Some((package_id, _)) = self.claimable_outpoints.get(&inp.previous_output) {
+ if let Some((claim_id, _)) = self.claimable_outpoints.get(&inp.previous_output) {
// If outpoint has claim request pending on it...
- if let Some(request) = self.pending_claim_requests.get_mut(package_id) {
+ if let Some(request) = self.pending_claim_requests.get_mut(claim_id) {
//... we need to verify equality between transaction outpoints and claim request
// outpoints to know if transaction is the original claim or a bumped one issued
// by us.
txid: tx.txid(),
height: conf_height,
block_hash: Some(conf_hash),
- event: OnchainEvent::Claim { package_id: *package_id }
+ event: OnchainEvent::Claim { claim_id: *claim_id }
};
if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
self.onchain_events_awaiting_threshold_conf.push(entry);
}
//TODO: recompute soonest_timelock to avoid wasting a bit on fees
if at_least_one_drop {
- bump_candidates.insert(*package_id, request.clone());
+ bump_candidates.insert(*claim_id, request.clone());
// If we have any pending claim events for the request being updated
// that have yet to be consumed, we'll remove them since they will
// end up producing an invalid transaction by double spending
#[cfg(anchors)] {
#[cfg(debug_assertions)] {
let existing = self.pending_claim_events.iter()
- .filter(|entry| entry.0 == *package_id).count();
+ .filter(|entry| entry.0 == *claim_id).count();
assert!(existing == 0 || existing == 1);
}
- self.pending_claim_events.retain(|entry| entry.0 != *package_id);
+ self.pending_claim_events.retain(|entry| entry.0 != *claim_id);
}
}
}
for entry in onchain_events_awaiting_threshold_conf {
if entry.has_reached_confirmation_threshold(cur_height) {
match entry.event {
- OnchainEvent::Claim { package_id } => {
+ OnchainEvent::Claim { claim_id } => {
// We may remove a whole set of claim outpoints here, as these one may have
// been aggregated in a single tx and claimed so atomically
- if let Some(request) = self.pending_claim_requests.remove(&package_id) {
+ if let Some(request) = self.pending_claim_requests.remove(&claim_id) {
for outpoint in request.outpoints() {
log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.",
- outpoint, log_bytes!(package_id));
+ outpoint, log_bytes!(claim_id.0));
self.claimable_outpoints.remove(outpoint);
}
#[cfg(anchors)] {
#[cfg(debug_assertions)] {
let num_existing = self.pending_claim_events.iter()
- .filter(|entry| entry.0 == package_id).count();
+ .filter(|entry| entry.0 == claim_id).count();
assert!(num_existing == 0 || num_existing == 1);
}
- self.pending_claim_events.retain(|(id, _)| *id != package_id);
+ self.pending_claim_events.retain(|(id, _)| *id != claim_id);
}
}
},
}
// Check if any pending claim request must be rescheduled
- for (package_id, request) in self.pending_claim_requests.iter() {
+ for (claim_id, request) in self.pending_claim_requests.iter() {
if cur_height >= request.timer() {
- bump_candidates.insert(*package_id, request.clone());
+ bump_candidates.insert(*claim_id, request.clone());
}
}
// Build, bump and rebroadcast tx accordingly
log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
- for (package_id, request) in bump_candidates.iter() {
+ for (claim_id, request) in bump_candidates.iter() {
if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(
cur_height, &request, true /* force_feerate_bump */, &*fee_estimator, &*logger,
) {
log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints());
#[cfg(debug_assertions)] {
let num_existing = self.pending_claim_events.iter().
- filter(|entry| entry.0 == *package_id).count();
+ filter(|entry| entry.0 == *claim_id).count();
assert!(num_existing == 0 || num_existing == 1);
}
- self.pending_claim_events.retain(|event| event.0 != *package_id);
- self.pending_claim_events.push((*package_id, claim_event));
+ self.pending_claim_events.retain(|event| event.0 != *claim_id);
+ self.pending_claim_events.push((*claim_id, claim_event));
},
}
- if let Some(request) = self.pending_claim_requests.get_mut(package_id) {
+ if let Some(request) = self.pending_claim_requests.get_mut(claim_id) {
request.set_timer(new_timer);
request.set_feerate(new_feerate);
}
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
- for ((_package_id, _), ref mut request) in bump_candidates.iter_mut() {
+ for ((_claim_id, _), ref mut request) in bump_candidates.iter_mut() {
// `height` is the height being disconnected, so our `current_height` is 1 lower.
let current_height = height - 1;
if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(
log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints());
#[cfg(debug_assertions)] {
let num_existing = self.pending_claim_events.iter()
- .filter(|entry| entry.0 == *_package_id).count();
+ .filter(|entry| entry.0 == *_claim_id).count();
assert!(num_existing == 0 || num_existing == 1);
}
- self.pending_claim_events.retain(|event| event.0 != *_package_id);
- self.pending_claim_events.push((*_package_id, claim_event));
+ self.pending_claim_events.retain(|event| event.0 != *_claim_id);
+ self.pending_claim_events.push((*_claim_id, claim_event));
},
}
}
//! Utitilies for bumping transactions originating from [`super::Event`]s.
+use core::convert::TryInto;
+use core::ops::Deref;
+
+use crate::chain::chaininterface::BroadcasterInterface;
+use crate::chain::ClaimId;
+use crate::sign::{ChannelSigner, EcdsaChannelSigner, SignerProvider};
+use crate::io_extras::sink;
use crate::ln::PaymentPreimage;
use crate::ln::chan_utils;
-use crate::ln::chan_utils::{ChannelTransactionParameters, HTLCOutputInCommitment};
+use crate::ln::chan_utils::{
+ ANCHOR_INPUT_WITNESS_WEIGHT, HTLC_SUCCESS_INPUT_ANCHOR_WITNESS_WEIGHT,
+ HTLC_TIMEOUT_INPUT_ANCHOR_WITNESS_WEIGHT, ChannelTransactionParameters, HTLCOutputInCommitment
+};
+use crate::events::Event;
+use crate::prelude::HashMap;
+use crate::sync::Mutex;
+use crate::util::logger::Logger;
-use bitcoin::{OutPoint, PackedLockTime, Script, Transaction, Txid, TxIn, TxOut, Witness};
+use bitcoin::{OutPoint, PackedLockTime, PubkeyHash, Sequence, Script, Transaction, Txid, TxIn, TxOut, Witness, WPubkeyHash};
+use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
+use bitcoin::consensus::Encodable;
use bitcoin::secp256k1;
use bitcoin::secp256k1::{PublicKey, Secp256k1};
use bitcoin::secp256k1::ecdsa::Signature;
+const EMPTY_SCRIPT_SIG_WEIGHT: u64 = 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64;
+
+const BASE_INPUT_SIZE: u64 = 32 /* txid */ + 4 /* vout */ + 4 /* sequence */;
+
+const BASE_INPUT_WEIGHT: u64 = BASE_INPUT_SIZE * WITNESS_SCALE_FACTOR as u64;
+
+// TODO: Define typed abstraction over feerates to handle their conversions.
+fn compute_feerate_sat_per_1000_weight(fee_sat: u64, weight: u64) -> u32 {
+ (fee_sat * 1000 / weight).try_into().unwrap_or(u32::max_value())
+}
+const fn fee_for_weight(feerate_sat_per_1000_weight: u32, weight: u64) -> u64 {
+ ((feerate_sat_per_1000_weight as u64 * weight) + 1000 - 1) / 1000
+}
+
/// A descriptor used to sign for a commitment transaction's anchor output.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct AnchorDescriptor {
/// [`EcdsaChannelSigner::sign_holder_anchor_input`]: crate::sign::EcdsaChannelSigner::sign_holder_anchor_input
/// [`build_anchor_input_witness`]: crate::ln::chan_utils::build_anchor_input_witness
ChannelClose {
+ /// The unique identifier for the claim of the anchor output in the commitment transaction.
+ ///
+ /// The identifier must map to the set of external UTXOs assigned to the claim, such that
+ /// they can be reused when a new claim with the same identifier needs to be made, resulting
+ /// in a fee-bumping attempt.
+ claim_id: ClaimId,
/// The target feerate that the transaction package, which consists of the commitment
/// transaction and the to-be-crafted child anchor transaction, must meet.
package_target_feerate_sat_per_1000_weight: u32,
/// [`EcdsaChannelSigner::sign_holder_htlc_transaction`]: crate::sign::EcdsaChannelSigner::sign_holder_htlc_transaction
/// [`HTLCDescriptor::tx_input_witness`]: HTLCDescriptor::tx_input_witness
HTLCResolution {
+ /// The unique identifier for the claim of the HTLCs in the confirmed commitment
+ /// transaction.
+ ///
+ /// The identifier must map to the set of external UTXOs assigned to the claim, such that
+ /// they can be reused when a new claim with the same identifier needs to be made, resulting
+ /// in a fee-bumping attempt.
+ claim_id: ClaimId,
/// The target feerate that the resulting HTLC transaction must meet.
target_feerate_sat_per_1000_weight: u32,
/// The set of pending HTLCs on the confirmed commitment that need to be claimed, preferably
tx_lock_time: PackedLockTime,
},
}
+
+/// An input that must be included in a transaction when performing coin selection through
+/// [`CoinSelectionSource::select_confirmed_utxos`]. It is guaranteed to be a SegWit input, so it
+/// must have an empty [`TxIn::script_sig`] when spent.
+pub struct Input {
+ /// The unique identifier of the input.
+ pub outpoint: OutPoint,
+ /// The upper-bound weight consumed by the input's full [`TxIn::script_sig`] and
+ /// [`TxIn::witness`], each with their lengths included, required to satisfy the output's
+ /// script.
+ pub satisfaction_weight: u64,
+}
+
+/// An unspent transaction output that is available to spend resulting from a successful
+/// [`CoinSelection`] attempt.
+#[derive(Clone, Debug)]
+pub struct Utxo {
+ /// The unique identifier of the output.
+ pub outpoint: OutPoint,
+ /// The output to spend.
+ pub output: TxOut,
+ /// The upper-bound weight consumed by the input's full [`TxIn::script_sig`] and [`TxIn::witness`], each
+ /// with their lengths included, required to satisfy the output's script. The weight consumed by
+ /// the input's `script_sig` must account for [`WITNESS_SCALE_FACTOR`].
+ pub satisfaction_weight: u64,
+}
+
+impl Utxo {
+ const P2WPKH_WITNESS_WEIGHT: u64 = 1 /* num stack items */ +
+ 1 /* sig length */ +
+ 73 /* sig including sighash flag */ +
+ 1 /* pubkey length */ +
+ 33 /* pubkey */;
+
+ /// Returns a `Utxo` with the `satisfaction_weight` estimate for a legacy P2PKH output.
+ pub fn new_p2pkh(outpoint: OutPoint, value: u64, pubkey_hash: &PubkeyHash) -> Self {
+ let script_sig_size = 1 /* script_sig length */ +
+ 1 /* OP_PUSH73 */ +
+ 73 /* sig including sighash flag */ +
+ 1 /* OP_PUSH33 */ +
+ 33 /* pubkey */;
+ Self {
+ outpoint,
+ output: TxOut {
+ value,
+ script_pubkey: Script::new_p2pkh(pubkey_hash),
+ },
+ satisfaction_weight: script_sig_size * WITNESS_SCALE_FACTOR as u64 + 1 /* empty witness */,
+ }
+ }
+
+ /// Returns a `Utxo` with the `satisfaction_weight` estimate for a P2WPKH nested in P2SH output.
+ pub fn new_nested_p2wpkh(outpoint: OutPoint, value: u64, pubkey_hash: &WPubkeyHash) -> Self {
+ let script_sig_size = 1 /* script_sig length */ +
+ 1 /* OP_0 */ +
+ 1 /* OP_PUSH20 */ +
+ 20 /* pubkey_hash */;
+ Self {
+ outpoint,
+ output: TxOut {
+ value,
+ script_pubkey: Script::new_p2sh(&Script::new_v0_p2wpkh(pubkey_hash).script_hash()),
+ },
+ satisfaction_weight: script_sig_size * WITNESS_SCALE_FACTOR as u64 + Self::P2WPKH_WITNESS_WEIGHT,
+ }
+ }
+
+ /// Returns a `Utxo` with the `satisfaction_weight` estimate for a SegWit v0 P2WPKH output.
+ pub fn new_v0_p2wpkh(outpoint: OutPoint, value: u64, pubkey_hash: &WPubkeyHash) -> Self {
+ Self {
+ outpoint,
+ output: TxOut {
+ value,
+ script_pubkey: Script::new_v0_p2wpkh(pubkey_hash),
+ },
+ satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + Self::P2WPKH_WITNESS_WEIGHT,
+ }
+ }
+}
+
+/// The result of a successful coin selection attempt for a transaction requiring additional UTXOs
+/// to cover its fees.
+pub struct CoinSelection {
+ /// The set of UTXOs (with at least 1 confirmation) to spend and use within a transaction
+ /// requiring additional fees.
+ confirmed_utxos: Vec<Utxo>,
+ /// An additional output tracking whether any change remained after coin selection. This output
+ /// should always have a value above dust for its given `script_pubkey`. It should not be
+ /// spent until the transaction it belongs to confirms to ensure mempool descendant limits are
+ /// not met. This implies no other party should be able to spend it except us.
+ change_output: Option<TxOut>,
+}
+
+/// An abstraction over a bitcoin wallet that can perform coin selection over a set of UTXOs and can
+/// sign for them. The coin selection method aims to mimic Bitcoin Core's `fundrawtransaction` RPC,
+/// which most wallets should be able to satisfy. Otherwise, consider implementing [`WalletSource`],
+/// which can provide a default implementation of this trait when used with [`Wallet`].
+pub trait CoinSelectionSource {
+ /// Performs coin selection of a set of UTXOs, with at least 1 confirmation each, that are
+ /// available to spend. Implementations are free to pick their coin selection algorithm of
+ /// choice, as long as the following requirements are met:
+ ///
+ /// 1. `must_spend` contains a set of [`Input`]s that must be included in the transaction
+ /// throughout coin selection, but must not be returned as part of the result.
+ /// 2. `must_pay_to` contains a set of [`TxOut`]s that must be included in the transaction
+ /// throughout coin selection. In some cases, like when funding an anchor transaction, this
+ /// set is empty. Implementations should ensure they handle this correctly on their end,
+ /// e.g., Bitcoin Core's `fundrawtransaction` RPC requires at least one output to be
+ /// provided, in which case a zero-value empty OP_RETURN output can be used instead.
+ /// 3. Enough inputs must be selected/contributed for the resulting transaction (including the
+ /// inputs and outputs noted above) to meet `target_feerate_sat_per_1000_weight`.
+ ///
+ /// Implementations must take note that [`Input::satisfaction_weight`] only tracks the weight of
+ /// the input's `script_sig` and `witness`. Some wallets, like Bitcoin Core's, may require
+ /// providing the full input weight. Failing to do so may lead to underestimating fee bumps and
+ /// delaying block inclusion.
+ ///
+ /// The `claim_id` must map to the set of external UTXOs assigned to the claim, such that they
+ /// can be re-used within new fee-bumped iterations of the original claiming transaction,
+ /// ensuring that claims don't double spend each other. If a specific `claim_id` has never had a
+ /// transaction associated with it, and all of the available UTXOs have already been assigned to
+ /// other claims, implementations must be willing to double spend their UTXOs. The choice of
+ /// which UTXOs to double spend is left to the implementation, but it must strive to keep the
+ /// set of other claims being double spent to a minimum.
+ fn select_confirmed_utxos(
+ &self, claim_id: ClaimId, must_spend: &[Input], must_pay_to: &[TxOut],
+ target_feerate_sat_per_1000_weight: u32,
+ ) -> Result<CoinSelection, ()>;
+ /// Signs and provides the full witness for all inputs within the transaction known to the
+ /// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]).
+ fn sign_tx(&self, tx: &mut Transaction) -> Result<(), ()>;
+}
+
+/// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to
+/// provide a default implementation to [`CoinSelectionSource`].
+pub trait WalletSource {
+ /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend.
+ fn list_confirmed_utxos(&self) -> Result<Vec<Utxo>, ()>;
+ /// Returns a script to use for change above dust resulting from a successful coin selection
+ /// attempt.
+ fn get_change_script(&self) -> Result<Script, ()>;
+ /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within
+ /// the transaction known to the wallet (i.e., any provided via
+ /// [`WalletSource::list_confirmed_utxos`]).
+ fn sign_tx(&self, tx: &mut Transaction) -> Result<(), ()>;
+}
+
+/// A wrapper over [`WalletSource`] that implements [`CoinSelection`] by preferring UTXOs that would
+/// avoid conflicting double spends. If not enough UTXOs are available to do so, conflicting double
+/// spends may happen.
+pub struct Wallet<W: Deref> where W::Target: WalletSource {
+ source: W,
+ // TODO: Do we care about cleaning this up once the UTXOs have a confirmed spend? We can do so
+ // by checking whether any UTXOs that exist in the map are no longer returned in
+ // `list_confirmed_utxos`.
+ locked_utxos: Mutex<HashMap<OutPoint, ClaimId>>,
+}
+
+impl<W: Deref> Wallet<W> where W::Target: WalletSource {
+ /// Returns a new instance backed by the given [`WalletSource`] that serves as an implementation
+ /// of [`CoinSelectionSource`].
+ pub fn new(source: W) -> Self {
+ Self { source, locked_utxos: Mutex::new(HashMap::new()) }
+ }
+
+ /// Performs coin selection on the set of UTXOs obtained from
+ /// [`WalletSource::list_confirmed_utxos`]. Its algorithm can be described as "smallest
+ /// above-dust-after-spend first", with a slight twist: we may skip UTXOs that are above dust at
+ /// the target feerate after having spent them in a separate claim transaction if
+ /// `force_conflicting_utxo_spend` is unset to avoid producing conflicting transactions. If
+ /// `tolerate_high_network_feerates` is set, we'll attempt to spend UTXOs that contribute at
+ /// least 1 satoshi at the current feerate, otherwise, we'll only attempt to spend those which
+ /// contribute at least twice their fee.
+ fn select_confirmed_utxos_internal(
+ &self, utxos: &[Utxo], claim_id: ClaimId, force_conflicting_utxo_spend: bool,
+ tolerate_high_network_feerates: bool, target_feerate_sat_per_1000_weight: u32,
+ preexisting_tx_weight: u64, target_amount_sat: u64,
+ ) -> Result<CoinSelection, ()> {
+ let mut locked_utxos = self.locked_utxos.lock().unwrap();
+ let mut eligible_utxos = utxos.iter().filter_map(|utxo| {
+ if let Some(utxo_claim_id) = locked_utxos.get(&utxo.outpoint) {
+ if *utxo_claim_id != claim_id && !force_conflicting_utxo_spend {
+ return None;
+ }
+ }
+ let fee_to_spend_utxo = fee_for_weight(
+ target_feerate_sat_per_1000_weight, BASE_INPUT_WEIGHT as u64 + utxo.satisfaction_weight,
+ );
+ let should_spend = if tolerate_high_network_feerates {
+ utxo.output.value > fee_to_spend_utxo
+ } else {
+ utxo.output.value >= fee_to_spend_utxo * 2
+ };
+ if should_spend {
+ Some((utxo, fee_to_spend_utxo))
+ } else {
+ None
+ }
+ }).collect::<Vec<_>>();
+ eligible_utxos.sort_unstable_by_key(|(utxo, _)| utxo.output.value);
+
+ let mut selected_amount = 0;
+ let mut total_fees = fee_for_weight(target_feerate_sat_per_1000_weight, preexisting_tx_weight);
+ let mut selected_utxos = Vec::new();
+ for (utxo, fee_to_spend_utxo) in eligible_utxos {
+ if selected_amount >= target_amount_sat + total_fees {
+ break;
+ }
+ selected_amount += utxo.output.value;
+ total_fees += fee_to_spend_utxo;
+ selected_utxos.push(utxo.clone());
+ }
+ if selected_amount < target_amount_sat + total_fees {
+ return Err(());
+ }
+ for utxo in &selected_utxos {
+ locked_utxos.insert(utxo.outpoint, claim_id);
+ }
+ core::mem::drop(locked_utxos);
+
+ let remaining_amount = selected_amount - target_amount_sat - total_fees;
+ let change_script = self.source.get_change_script()?;
+ let change_output_fee = fee_for_weight(
+ target_feerate_sat_per_1000_weight,
+ (8 /* value */ + change_script.consensus_encode(&mut sink()).unwrap() as u64) *
+ WITNESS_SCALE_FACTOR as u64,
+ );
+ let change_output_amount = remaining_amount.saturating_sub(change_output_fee);
+ let change_output = if change_output_amount < change_script.dust_value().to_sat() {
+ None
+ } else {
+ Some(TxOut { script_pubkey: change_script, value: change_output_amount })
+ };
+
+ Ok(CoinSelection {
+ confirmed_utxos: selected_utxos,
+ change_output,
+ })
+ }
+}
+
+impl<W: Deref> CoinSelectionSource for Wallet<W> where W::Target: WalletSource {
+ fn select_confirmed_utxos(
+ &self, claim_id: ClaimId, must_spend: &[Input], must_pay_to: &[TxOut],
+ target_feerate_sat_per_1000_weight: u32,
+ ) -> Result<CoinSelection, ()> {
+ let utxos = self.source.list_confirmed_utxos()?;
+ // TODO: Use fee estimation utils when we upgrade to bitcoin v0.30.0.
+ const BASE_TX_SIZE: u64 = 4 /* version */ + 1 /* input count */ + 1 /* output count */ + 4 /* locktime */;
+ let total_output_size: u64 = must_pay_to.iter().map(|output|
+ 8 /* value */ + 1 /* script len */ + output.script_pubkey.len() as u64
+ ).sum();
+ let total_satisfaction_weight: u64 = must_spend.iter().map(|input| input.satisfaction_weight).sum();
+ let total_input_weight = (BASE_INPUT_WEIGHT * must_spend.len() as u64) + total_satisfaction_weight;
+
+ let preexisting_tx_weight = 2 /* segwit marker & flag */ + total_input_weight +
+ ((BASE_TX_SIZE + total_output_size) * WITNESS_SCALE_FACTOR as u64);
+ let target_amount_sat = must_pay_to.iter().map(|output| output.value).sum();
+ let do_coin_selection = |force_conflicting_utxo_spend: bool, tolerate_high_network_feerates: bool| {
+ self.select_confirmed_utxos_internal(
+ &utxos, claim_id, force_conflicting_utxo_spend, tolerate_high_network_feerates,
+ target_feerate_sat_per_1000_weight, preexisting_tx_weight, target_amount_sat,
+ )
+ };
+ do_coin_selection(false, false)
+ .or_else(|_| do_coin_selection(false, true))
+ .or_else(|_| do_coin_selection(true, false))
+ .or_else(|_| do_coin_selection(true, true))
+ }
+
+ fn sign_tx(&self, tx: &mut Transaction) -> Result<(), ()> {
+ self.source.sign_tx(tx)
+ }
+}
+
+/// A handler for [`Event::BumpTransaction`] events that sources confirmed UTXOs from a
+/// [`CoinSelectionSource`] to fee bump transactions via Child-Pays-For-Parent (CPFP) or
+/// Replace-By-Fee (RBF).
+pub struct BumpTransactionEventHandler<B: Deref, C: Deref, SP: Deref, L: Deref>
+where
+ B::Target: BroadcasterInterface,
+ C::Target: CoinSelectionSource,
+ SP::Target: SignerProvider,
+ L::Target: Logger,
+{
+ broadcaster: B,
+ utxo_source: C,
+ signer_provider: SP,
+ logger: L,
+ secp: Secp256k1<secp256k1::All>,
+}
+
+impl<B: Deref, C: Deref, SP: Deref, L: Deref> BumpTransactionEventHandler<B, C, SP, L>
+where
+ B::Target: BroadcasterInterface,
+ C::Target: CoinSelectionSource,
+ SP::Target: SignerProvider,
+ L::Target: Logger,
+{
+ /// Returns a new instance capable of handling [`Event::BumpTransaction`] events.
+ pub fn new(broadcaster: B, utxo_source: C, signer_provider: SP, logger: L) -> Self {
+ Self {
+ broadcaster,
+ utxo_source,
+ signer_provider,
+ logger,
+ secp: Secp256k1::new(),
+ }
+ }
+
+ /// Updates a transaction with the result of a successful coin selection attempt.
+ fn process_coin_selection(&self, tx: &mut Transaction, mut coin_selection: CoinSelection) {
+ for utxo in coin_selection.confirmed_utxos.drain(..) {
+ tx.input.push(TxIn {
+ previous_output: utxo.outpoint,
+ script_sig: Script::new(),
+ sequence: Sequence::ZERO,
+ witness: Witness::new(),
+ });
+ }
+ if let Some(change_output) = coin_selection.change_output.take() {
+ tx.output.push(change_output);
+ } else if tx.output.is_empty() {
+ // We weren't provided a change output, likely because the input set was a perfect
+ // match, but we still need to have at least one output in the transaction for it to be
+ // considered standard. We choose to go with an empty OP_RETURN as it is the cheapest
+ // way to include a dummy output.
+ tx.output.push(TxOut {
+ value: 0,
+ script_pubkey: Script::new_op_return(&[]),
+ });
+ }
+ }
+
+ /// Returns an unsigned transaction spending an anchor output of the commitment transaction, and
+ /// any additional UTXOs sourced, to bump the commitment transaction's fee.
+ fn build_anchor_tx(
+ &self, claim_id: ClaimId, target_feerate_sat_per_1000_weight: u32,
+ commitment_tx: &Transaction, anchor_descriptor: &AnchorDescriptor,
+ ) -> Result<Transaction, ()> {
+ let must_spend = vec![Input {
+ outpoint: anchor_descriptor.outpoint,
+ satisfaction_weight: commitment_tx.weight() as u64 + ANCHOR_INPUT_WITNESS_WEIGHT + EMPTY_SCRIPT_SIG_WEIGHT,
+ }];
+ let coin_selection = self.utxo_source.select_confirmed_utxos(
+ claim_id, &must_spend, &[], target_feerate_sat_per_1000_weight,
+ )?;
+
+ let mut tx = Transaction {
+ version: 2,
+ lock_time: PackedLockTime::ZERO, // TODO: Use next best height.
+ input: vec![TxIn {
+ previous_output: anchor_descriptor.outpoint,
+ script_sig: Script::new(),
+ sequence: Sequence::ZERO,
+ witness: Witness::new(),
+ }],
+ output: vec![],
+ };
+ self.process_coin_selection(&mut tx, coin_selection);
+ Ok(tx)
+ }
+
+ /// Handles a [`BumpTransactionEvent::ChannelClose`] event variant by producing a fully-signed
+ /// transaction spending an anchor output of the commitment transaction to bump its fee and
+ /// broadcasts them to the network as a package.
+ fn handle_channel_close(
+ &self, claim_id: ClaimId, package_target_feerate_sat_per_1000_weight: u32,
+ commitment_tx: &Transaction, commitment_tx_fee_sat: u64, anchor_descriptor: &AnchorDescriptor,
+ ) -> Result<(), ()> {
+ // Compute the feerate the anchor transaction must meet to meet the overall feerate for the
+ // package (commitment + anchor transactions).
+ let commitment_tx_sat_per_1000_weight: u32 = compute_feerate_sat_per_1000_weight(
+ commitment_tx_fee_sat, commitment_tx.weight() as u64,
+ );
+ if commitment_tx_sat_per_1000_weight >= package_target_feerate_sat_per_1000_weight {
+ // If the commitment transaction already has a feerate high enough on its own, broadcast
+ // it as is without a child.
+ self.broadcaster.broadcast_transactions(&[&commitment_tx]);
+ return Ok(());
+ }
+
+ let mut anchor_tx = self.build_anchor_tx(
+ claim_id, package_target_feerate_sat_per_1000_weight, commitment_tx, anchor_descriptor,
+ )?;
+ debug_assert_eq!(anchor_tx.output.len(), 1);
+
+ self.utxo_source.sign_tx(&mut anchor_tx)?;
+ let signer = self.signer_provider.derive_channel_signer(
+ anchor_descriptor.channel_value_satoshis, anchor_descriptor.channel_keys_id,
+ );
+ let anchor_sig = signer.sign_holder_anchor_input(&anchor_tx, 0, &self.secp)?;
+ anchor_tx.input[0].witness =
+ chan_utils::build_anchor_input_witness(&signer.pubkeys().funding_pubkey, &anchor_sig);
+
+ self.broadcaster.broadcast_transactions(&[&commitment_tx, &anchor_tx]);
+ Ok(())
+ }
+
+ /// Returns an unsigned, fee-bumped HTLC transaction, along with the set of signers required to
+ /// fulfill the witness for each HTLC input within it.
+ fn build_htlc_tx(
+ &self, claim_id: ClaimId, target_feerate_sat_per_1000_weight: u32,
+ htlc_descriptors: &[HTLCDescriptor], tx_lock_time: PackedLockTime,
+ ) -> Result<(Transaction, HashMap<[u8; 32], <SP::Target as SignerProvider>::Signer>), ()> {
+ let mut tx = Transaction {
+ version: 2,
+ lock_time: tx_lock_time,
+ input: vec![],
+ output: vec![],
+ };
+ // Unfortunately, we need to derive the signer for each HTLC ahead of time to obtain its
+ // input.
+ let mut signers = HashMap::new();
+ let mut must_spend = Vec::with_capacity(htlc_descriptors.len());
+ for htlc_descriptor in htlc_descriptors {
+ let signer = signers.entry(htlc_descriptor.channel_keys_id)
+ .or_insert_with(||
+ self.signer_provider.derive_channel_signer(
+ htlc_descriptor.channel_value_satoshis, htlc_descriptor.channel_keys_id,
+ )
+ );
+ let per_commitment_point = signer.get_per_commitment_point(
+ htlc_descriptor.per_commitment_number, &self.secp
+ );
+
+ let htlc_input = htlc_descriptor.unsigned_tx_input();
+ must_spend.push(Input {
+ outpoint: htlc_input.previous_output.clone(),
+ satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + if htlc_descriptor.preimage.is_some() {
+ HTLC_SUCCESS_INPUT_ANCHOR_WITNESS_WEIGHT
+ } else {
+ HTLC_TIMEOUT_INPUT_ANCHOR_WITNESS_WEIGHT
+ },
+ });
+ tx.input.push(htlc_input);
+ let htlc_output = htlc_descriptor.tx_output(&per_commitment_point, &self.secp);
+ tx.output.push(htlc_output);
+ }
+
+ let coin_selection = self.utxo_source.select_confirmed_utxos(
+ claim_id, &must_spend, &tx.output, target_feerate_sat_per_1000_weight,
+ )?;
+ self.process_coin_selection(&mut tx, coin_selection);
+ Ok((tx, signers))
+ }
+
+ /// Handles a [`BumpTransactionEvent::HTLCResolution`] event variant by producing a
+ /// fully-signed, fee-bumped HTLC transaction that is broadcast to the network.
+ fn handle_htlc_resolution(
+ &self, claim_id: ClaimId, target_feerate_sat_per_1000_weight: u32,
+ htlc_descriptors: &[HTLCDescriptor], tx_lock_time: PackedLockTime,
+ ) -> Result<(), ()> {
+ let (mut htlc_tx, signers) = self.build_htlc_tx(
+ claim_id, target_feerate_sat_per_1000_weight, htlc_descriptors, tx_lock_time,
+ )?;
+
+ self.utxo_source.sign_tx(&mut htlc_tx)?;
+ for (idx, htlc_descriptor) in htlc_descriptors.iter().enumerate() {
+ let signer = signers.get(&htlc_descriptor.channel_keys_id).unwrap();
+ let htlc_sig = signer.sign_holder_htlc_transaction(
+ &htlc_tx, idx, htlc_descriptor, &self.secp
+ )?;
+ let per_commitment_point = signer.get_per_commitment_point(
+ htlc_descriptor.per_commitment_number, &self.secp
+ );
+ let witness_script = htlc_descriptor.witness_script(&per_commitment_point, &self.secp);
+ htlc_tx.input[idx].witness = htlc_descriptor.tx_input_witness(&htlc_sig, &witness_script);
+ }
+
+ self.broadcaster.broadcast_transactions(&[&htlc_tx]);
+ Ok(())
+ }
+
+ /// Handles all variants of [`BumpTransactionEvent`], immediately returning otherwise.
+ pub fn handle_event(&self, event: &Event) {
+ let event = if let Event::BumpTransaction(event) = event {
+ event
+ } else {
+ return;
+ };
+ match event {
+ BumpTransactionEvent::ChannelClose {
+ claim_id, package_target_feerate_sat_per_1000_weight, commitment_tx,
+ anchor_descriptor, commitment_tx_fee_satoshis, ..
+ } => {
+ if let Err(_) = self.handle_channel_close(
+ *claim_id, *package_target_feerate_sat_per_1000_weight, commitment_tx,
+ *commitment_tx_fee_satoshis, anchor_descriptor,
+ ) {
+ log_error!(self.logger, "Failed bumping commitment transaction fee for {}",
+ commitment_tx.txid());
+ }
+ }
+ BumpTransactionEvent::HTLCResolution {
+ claim_id, target_feerate_sat_per_1000_weight, htlc_descriptors, tx_lock_time,
+ } => {
+ if let Err(_) = self.handle_htlc_resolution(
+ *claim_id, *target_feerate_sat_per_1000_weight, htlc_descriptors, *tx_lock_time,
+ ) {
+ log_error!(self.logger, "Failed bumping HTLC transaction fee for commitment {}",
+ htlc_descriptors[0].commitment_txid);
+ }
+ }
+ }
+ }
+}