1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! The logic to build claims and bump in-flight transactions until confirmations.
12 //! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
13 //! building, tracking, bumping and notifications functions.
15 use bitcoin::blockdata::transaction::Transaction;
16 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
17 use bitcoin::blockdata::script::Script;
19 use bitcoin::hash_types::Txid;
21 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
22 use bitcoin::secp256k1;
24 use crate::ln::msgs::DecodeError;
25 use crate::ln::PaymentPreimage;
27 use crate::ln::chan_utils;
28 use crate::ln::chan_utils::{ChannelTransactionParameters, HolderCommitmentTransaction};
30 use crate::chain::chaininterface::ConfirmationTarget;
31 use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
32 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
33 use crate::chain::keysinterface::{Sign, KeysInterface};
35 use crate::chain::package::PackageSolvingData;
36 use crate::chain::package::PackageTemplate;
37 use crate::util::logger::Logger;
38 use crate::util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, VecWriter};
39 use crate::util::byte_utils;
42 use crate::prelude::*;
43 use alloc::collections::BTreeMap;
46 use core::mem::replace;
49 use bitcoin::hashes::Hash;
51 const MAX_ALLOC_SIZE: usize = 64*1024;
53 /// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
54 /// transaction causing it.
56 /// Used to determine when the on-chain event can be considered safe from a chain reorganization.
57 #[derive(PartialEq, Eq)]
58 struct OnchainEventEntry {
64 impl OnchainEventEntry {
65 fn confirmation_threshold(&self) -> u32 {
66 self.height + ANTI_REORG_DELAY - 1
69 fn has_reached_confirmation_threshold(&self, height: u32) -> bool {
70 height >= self.confirmation_threshold()
74 /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
75 /// once they mature to enough confirmations (ANTI_REORG_DELAY)
76 #[derive(PartialEq, Eq)]
78 /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
79 /// bump-txn candidate buffer.
83 /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
84 /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
85 /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
87 package: PackageTemplate,
91 impl Writeable for OnchainEventEntry {
92 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
93 write_tlv_fields!(writer, {
94 (0, self.txid, required),
95 (2, self.height, required),
96 (4, self.event, required),
102 impl MaybeReadable for OnchainEventEntry {
103 fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
104 let mut txid = Txid::all_zeros();
106 let mut event = None;
107 read_tlv_fields!(reader, {
109 (2, height, required),
110 (4, event, ignorable),
112 if let Some(ev) = event {
113 Ok(Some(Self { txid, height, event: ev }))
120 impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
122 (0, claim_request, required),
124 (1, ContentiousOutpoint) => {
125 (0, package, required),
129 impl Readable for Option<Vec<Option<(usize, Signature)>>> {
130 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
131 match Readable::read(reader)? {
134 let vlen: u64 = Readable::read(reader)?;
135 let mut ret = Vec::with_capacity(cmp::min(vlen as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<Option<(usize, Signature)>>()));
137 ret.push(match Readable::read(reader)? {
139 1u8 => Some((<u64 as Readable>::read(reader)? as usize, Readable::read(reader)?)),
140 _ => return Err(DecodeError::InvalidValue)
145 _ => Err(DecodeError::InvalidValue),
150 impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
151 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
155 (vec.len() as u64).write(writer)?;
156 for opt in vec.iter() {
158 &Some((ref idx, ref sig)) => {
160 (*idx as u64).write(writer)?;
163 &None => 0u8.write(writer)?,
167 &None => 0u8.write(writer)?,
173 // Represents the different types of claims for which events are yielded externally to satisfy said
176 pub(crate) enum ClaimEvent {
177 /// Event yielded to signal that the commitment transaction fee must be bumped to claim any
178 /// encumbered funds and proceed to HTLC resolution, if any HTLCs exist.
180 package_target_feerate_sat_per_1000_weight: u32,
181 commitment_tx: Transaction,
182 anchor_output_idx: u32,
186 /// Represents the different ways an output can be claimed (i.e., spent to an address under our
187 /// control) onchain.
188 pub(crate) enum OnchainClaim {
189 /// A finalized transaction pending confirmation spending the output to claim.
192 /// An event yielded externally to signal additional inputs must be added to a transaction
193 /// pending confirmation spending the output to claim.
197 /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
198 /// do RBF bumping if possible.
199 pub struct OnchainTxHandler<ChannelSigner: Sign> {
200 destination_script: Script,
201 holder_commitment: HolderCommitmentTransaction,
202 // holder_htlc_sigs and prev_holder_htlc_sigs are in the order as they appear in the commitment
203 // transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
204 // the set of HTLCs in the HolderCommitmentTransaction.
205 holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
206 prev_holder_commitment: Option<HolderCommitmentTransaction>,
207 prev_holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
209 pub(super) signer: ChannelSigner,
210 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
212 // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
213 // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
214 // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
215 // same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
216 // block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
217 // equality between spending transaction and claim request. If true, it means transaction was one our claiming one
218 // after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
219 // we need to regenerate new claim request with reduced set of still-claimable outpoints.
220 // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
221 // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
222 // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
223 #[cfg(test)] // Used in functional_test to verify sanitization
224 pub(crate) pending_claim_requests: HashMap<Txid, PackageTemplate>,
226 pending_claim_requests: HashMap<Txid, PackageTemplate>,
228 pending_claim_events: HashMap<Txid, ClaimEvent>,
230 // Used to link outpoints claimed in a connected block to a pending claim request.
231 // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
232 // Value is (pending claim request identifier, confirmation_block), identifier
233 // is txid of the initial claiming transaction and is immutable until outpoint is
234 // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
235 // block with output gets disconnected.
236 #[cfg(test)] // Used in functional_test to verify sanitization
237 pub claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
239 claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
241 locktimed_packages: BTreeMap<u32, Vec<PackageTemplate>>,
243 onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
245 pub(super) secp_ctx: Secp256k1<secp256k1::All>,
248 const SERIALIZATION_VERSION: u8 = 1;
249 const MIN_SERIALIZATION_VERSION: u8 = 1;
251 impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
252 pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
253 write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
255 self.destination_script.write(writer)?;
256 self.holder_commitment.write(writer)?;
257 self.holder_htlc_sigs.write(writer)?;
258 self.prev_holder_commitment.write(writer)?;
259 self.prev_holder_htlc_sigs.write(writer)?;
261 self.channel_transaction_parameters.write(writer)?;
263 let mut key_data = VecWriter(Vec::new());
264 self.signer.write(&mut key_data)?;
265 assert!(key_data.0.len() < core::usize::MAX);
266 assert!(key_data.0.len() < core::u32::MAX as usize);
267 (key_data.0.len() as u32).write(writer)?;
268 writer.write_all(&key_data.0[..])?;
270 writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
271 for (ref ancestor_claim_txid, request) in self.pending_claim_requests.iter() {
272 ancestor_claim_txid.write(writer)?;
273 request.write(writer)?;
276 writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
277 for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
279 claim_and_height.0.write(writer)?;
280 claim_and_height.1.write(writer)?;
283 writer.write_all(&byte_utils::be64_to_array(self.locktimed_packages.len() as u64))?;
284 for (ref locktime, ref packages) in self.locktimed_packages.iter() {
285 locktime.write(writer)?;
286 writer.write_all(&byte_utils::be64_to_array(packages.len() as u64))?;
287 for ref package in packages.iter() {
288 package.write(writer)?;
292 writer.write_all(&byte_utils::be64_to_array(self.onchain_events_awaiting_threshold_conf.len() as u64))?;
293 for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
294 entry.write(writer)?;
297 write_tlv_fields!(writer, {});
302 impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
303 fn read<R: io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
304 let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
306 let destination_script = Readable::read(reader)?;
308 let holder_commitment = Readable::read(reader)?;
309 let holder_htlc_sigs = Readable::read(reader)?;
310 let prev_holder_commitment = Readable::read(reader)?;
311 let prev_holder_htlc_sigs = Readable::read(reader)?;
313 let channel_parameters = Readable::read(reader)?;
315 let keys_len: u32 = Readable::read(reader)?;
316 let mut keys_data = Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE));
317 while keys_data.len() != keys_len as usize {
318 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
319 let mut data = [0; 1024];
320 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.len())];
321 reader.read_exact(read_slice)?;
322 keys_data.extend_from_slice(read_slice);
324 let signer = keys_manager.read_chan_signer(&keys_data)?;
326 let pending_claim_requests_len: u64 = Readable::read(reader)?;
327 let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
328 for _ in 0..pending_claim_requests_len {
329 pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
332 let claimable_outpoints_len: u64 = Readable::read(reader)?;
333 let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
334 for _ in 0..claimable_outpoints_len {
335 let outpoint = Readable::read(reader)?;
336 let ancestor_claim_txid = Readable::read(reader)?;
337 let height = Readable::read(reader)?;
338 claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
341 let locktimed_packages_len: u64 = Readable::read(reader)?;
342 let mut locktimed_packages = BTreeMap::new();
343 for _ in 0..locktimed_packages_len {
344 let locktime = Readable::read(reader)?;
345 let packages_len: u64 = Readable::read(reader)?;
346 let mut packages = Vec::with_capacity(cmp::min(packages_len as usize, MAX_ALLOC_SIZE / core::mem::size_of::<PackageTemplate>()));
347 for _ in 0..packages_len {
348 packages.push(Readable::read(reader)?);
350 locktimed_packages.insert(locktime, packages);
353 let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
354 let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
355 for _ in 0..waiting_threshold_conf_len {
356 if let Some(val) = MaybeReadable::read(reader)? {
357 onchain_events_awaiting_threshold_conf.push(val);
361 read_tlv_fields!(reader, {});
363 let mut secp_ctx = Secp256k1::new();
364 secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
366 Ok(OnchainTxHandler {
370 prev_holder_commitment,
371 prev_holder_htlc_sigs,
373 channel_transaction_parameters: channel_parameters,
376 pending_claim_requests,
377 onchain_events_awaiting_threshold_conf,
379 pending_claim_events: HashMap::new(),
385 impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
386 pub(crate) fn new(destination_script: Script, signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1<secp256k1::All>) -> Self {
390 holder_htlc_sigs: None,
391 prev_holder_commitment: None,
392 prev_holder_htlc_sigs: None,
394 channel_transaction_parameters: channel_parameters,
395 pending_claim_requests: HashMap::new(),
396 claimable_outpoints: HashMap::new(),
397 locktimed_packages: BTreeMap::new(),
398 onchain_events_awaiting_threshold_conf: Vec::new(),
400 pending_claim_events: HashMap::new(),
406 pub(crate) fn get_prev_holder_commitment_to_self_value(&self) -> Option<u64> {
407 self.prev_holder_commitment.as_ref().map(|commitment| commitment.to_broadcaster_value_sat())
410 pub(crate) fn get_cur_holder_commitment_to_self_value(&self) -> u64 {
411 self.holder_commitment.to_broadcaster_value_sat()
415 pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<ClaimEvent> {
416 let mut ret = HashMap::new();
417 swap(&mut ret, &mut self.pending_claim_events);
418 ret.into_iter().map(|(_, event)| event).collect::<Vec<_>>()
421 /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty
422 /// onchain) lays on the assumption of claim transactions getting confirmed before timelock
423 /// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck
424 /// in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or
425 /// Child-Pay-For-Parent.
427 /// Panics if there are signing errors, because signing operations in reaction to on-chain
428 /// events are not expected to fail, and if they do, we may lose funds.
429 fn generate_claim<F: Deref, L: Deref>(&mut self, cur_height: u32, cached_request: &PackageTemplate, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L) -> Option<(Option<u32>, u64, OnchainClaim)>
430 where F::Target: FeeEstimator,
433 let request_outpoints = cached_request.outpoints();
434 if request_outpoints.is_empty() {
435 // Don't prune pending claiming request yet, we may have to resurrect HTLCs. Untractable
436 // packages cannot be aggregated and will never be split, so we cannot end up with an
438 debug_assert!(cached_request.is_malleable());
441 // If we've seen transaction inclusion in the chain for all outpoints in our request, we
442 // don't need to continue generating more claims. We'll keep tracking the request to fully
443 // remove it once it reaches the confirmation threshold, or to generate a new claim if the
444 // transaction is reorged out.
445 let mut all_inputs_have_confirmed_spend = true;
446 for outpoint in &request_outpoints {
447 if let Some(first_claim_txid_height) = self.claimable_outpoints.get(outpoint) {
448 // We check for outpoint spends within claims individually rather than as a set
449 // since requests can have outpoints split off.
450 if !self.onchain_events_awaiting_threshold_conf.iter()
451 .any(|event_entry| if let OnchainEvent::Claim { claim_request } = event_entry.event {
452 first_claim_txid_height.0 == claim_request
454 // The onchain event is not a claim, keep seeking until we find one.
458 // Either we had no `OnchainEvent::Claim`, or we did but none matched the
459 // outpoint's registered spend.
460 all_inputs_have_confirmed_spend = false;
463 // The request's outpoint spend does not exist yet.
464 all_inputs_have_confirmed_spend = false;
467 if all_inputs_have_confirmed_spend {
471 // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
472 // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
473 let new_timer = Some(cached_request.get_height_timer(cur_height));
474 if cached_request.is_malleable() {
475 let predicted_weight = cached_request.package_weight(&self.destination_script);
476 if let Some((output_value, new_feerate)) =
477 cached_request.compute_package_output(predicted_weight, self.destination_script.dust_value().to_sat(), fee_estimator, logger) {
478 assert!(new_feerate != 0);
480 let transaction = cached_request.finalize_malleable_package(self, output_value, self.destination_script.clone(), logger).unwrap();
481 log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate);
482 assert!(predicted_weight >= transaction.weight());
483 return Some((new_timer, new_feerate, OnchainClaim::Tx(transaction)))
486 // Untractable packages cannot have their fees bumped through Replace-By-Fee. Some
487 // packages may support fee bumping through Child-Pays-For-Parent, indicated by those
488 // which require external funding.
490 let inputs = cached_request.inputs();
492 let mut inputs = cached_request.inputs();
493 debug_assert_eq!(inputs.len(), 1);
494 let tx = match cached_request.finalize_untractable_package(self, logger) {
498 if !cached_request.requires_external_funding() {
499 return Some((None, 0, OnchainClaim::Tx(tx)));
502 return inputs.find_map(|input| match input {
503 // Commitment inputs with anchors support are the only untractable inputs supported
504 // thus far that require external funding.
505 PackageSolvingData::HolderFundingOutput(..) => {
506 debug_assert_eq!(tx.txid(), self.holder_commitment.trust().txid(),
507 "Holder commitment transaction mismatch");
508 // We'll locate an anchor output we can spend within the commitment transaction.
509 let funding_pubkey = &self.channel_transaction_parameters.holder_pubkeys.funding_pubkey;
510 match chan_utils::get_anchor_output(&tx, funding_pubkey) {
511 // An anchor output was found, so we should yield a funding event externally.
513 // TODO: Use a lower confirmation target when both our and the
514 // counterparty's latest commitment don't have any HTLCs present.
515 let conf_target = ConfirmationTarget::HighPriority;
516 let package_target_feerate_sat_per_1000_weight = cached_request
517 .compute_package_feerate(fee_estimator, conf_target);
520 package_target_feerate_sat_per_1000_weight as u64,
521 OnchainClaim::Event(ClaimEvent::BumpCommitment {
522 package_target_feerate_sat_per_1000_weight,
523 commitment_tx: tx.clone(),
524 anchor_output_idx: idx,
528 // An anchor output was not found. There's nothing we can do other than
529 // attempt to broadcast the transaction with its current fee rate and hope
530 // it confirms. This is essentially the same behavior as a commitment
531 // transaction without anchor outputs.
532 None => Some((None, 0, OnchainClaim::Tx(tx.clone()))),
536 debug_assert!(false, "Only HolderFundingOutput inputs should be untractable and require external funding");
544 /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
545 /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
546 /// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
547 /// if we receive a preimage after force-close.
548 /// `conf_height` represents the height at which the transactions in `txn_matched` were
549 /// confirmed. This does not need to equal the current blockchain tip height, which should be
550 /// provided via `cur_height`, however it must never be higher than `cur_height`.
551 pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
552 where B::Target: BroadcasterInterface,
553 F::Target: FeeEstimator,
556 log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {} and {} claim requests", cur_height, txn_matched.len(), conf_height, requests.len());
557 let mut preprocessed_requests = Vec::with_capacity(requests.len());
558 let mut aggregated_request = None;
560 // Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
561 // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
562 for req in requests {
563 // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
564 if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
565 log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
567 let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
568 .find(|locked_package| locked_package.outpoints() == req.outpoints());
569 if let Some(package) = timelocked_equivalent_package {
570 log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
571 req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_timelock());
575 if req.package_timelock() > cur_height + 1 {
576 log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), cur_height);
577 for outpoint in req.outpoints() {
578 log_info!(logger, " Outpoint {}", outpoint);
580 self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req);
584 log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), cur_height + CLTV_SHARED_CLAIM_BUFFER);
585 if req.timelock() <= cur_height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
586 // Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
587 preprocessed_requests.push(req);
588 } else if aggregated_request.is_none() {
589 aggregated_request = Some(req);
591 aggregated_request.as_mut().unwrap().merge_package(req);
595 if let Some(req) = aggregated_request {
596 preprocessed_requests.push(req);
599 // Claim everything up to and including cur_height + 1
600 let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 2));
601 for (pop_height, mut entry) in self.locktimed_packages.iter_mut() {
602 log_trace!(logger, "Restoring delayed claim of package(s) at their timelock at {}.", pop_height);
603 preprocessed_requests.append(&mut entry);
605 self.locktimed_packages = remaining_locked_packages;
607 // Generate claim transactions and track them to bump if necessary at
608 // height timer expiration (i.e in how many blocks we're going to take action).
609 for mut req in preprocessed_requests {
610 if let Some((new_timer, new_feerate, claim)) = self.generate_claim(cur_height, &req, &*fee_estimator, &*logger) {
611 req.set_timer(new_timer);
612 req.set_feerate(new_feerate);
613 let txid = match claim {
614 OnchainClaim::Tx(tx) => {
615 log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
616 broadcaster.broadcast_transaction(&tx);
620 OnchainClaim::Event(claim_event) => {
621 log_info!(logger, "Yielding onchain event to spend inputs {:?}", req.outpoints());
622 let txid = match claim_event {
623 ClaimEvent::BumpCommitment { ref commitment_tx, .. } => commitment_tx.txid(),
625 self.pending_claim_events.insert(txid, claim_event);
629 for k in req.outpoints() {
630 log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
631 self.claimable_outpoints.insert(k.clone(), (txid, conf_height));
633 self.pending_claim_requests.insert(txid, req);
637 let mut bump_candidates = HashMap::new();
638 for tx in txn_matched {
639 // Scan all input to verify is one of the outpoint spent is of interest for us
640 let mut claimed_outputs_material = Vec::new();
641 for inp in &tx.input {
642 if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
643 // If outpoint has claim request pending on it...
644 if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
645 //... we need to verify equality between transaction outpoints and claim request
646 // outpoints to know if transaction is the original claim or a bumped one issued
648 let mut set_equality = true;
649 if request.outpoints().len() != tx.input.len() {
650 set_equality = false;
652 for (claim_inp, tx_inp) in request.outpoints().iter().zip(tx.input.iter()) {
653 if **claim_inp != tx_inp.previous_output {
654 set_equality = false;
659 macro_rules! clean_claim_request_after_safety_delay {
661 let entry = OnchainEventEntry {
664 event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }
666 if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
667 self.onchain_events_awaiting_threshold_conf.push(entry);
672 // If this is our transaction (or our counterparty spent all the outputs
673 // before we could anyway with same inputs order than us), wait for
674 // ANTI_REORG_DELAY and clean the RBF tracking map.
676 clean_claim_request_after_safety_delay!();
677 } else { // If false, generate new claim request with update outpoint set
678 let mut at_least_one_drop = false;
679 for input in tx.input.iter() {
680 if let Some(package) = request.split_package(&input.previous_output) {
681 claimed_outputs_material.push(package);
682 at_least_one_drop = true;
684 // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
685 if request.outpoints().is_empty() {
686 clean_claim_request_after_safety_delay!();
689 //TODO: recompute soonest_timelock to avoid wasting a bit on fees
690 if at_least_one_drop {
691 bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone());
694 break; //No need to iterate further, either tx is our or their
696 panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
700 for package in claimed_outputs_material.drain(..) {
701 let entry = OnchainEventEntry {
704 event: OnchainEvent::ContentiousOutpoint { package },
706 if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
707 self.onchain_events_awaiting_threshold_conf.push(entry);
712 // After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
713 let onchain_events_awaiting_threshold_conf =
714 self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
715 for entry in onchain_events_awaiting_threshold_conf {
716 if entry.has_reached_confirmation_threshold(cur_height) {
718 OnchainEvent::Claim { claim_request } => {
719 // We may remove a whole set of claim outpoints here, as these one may have
720 // been aggregated in a single tx and claimed so atomically
721 if let Some(request) = self.pending_claim_requests.remove(&claim_request) {
722 for outpoint in request.outpoints() {
723 log_debug!(logger, "Removing claim tracking for {} due to maturation of claim tx {}.", outpoint, claim_request);
724 self.claimable_outpoints.remove(&outpoint);
726 self.pending_claim_events.remove(&claim_request);
730 OnchainEvent::ContentiousOutpoint { package } => {
731 log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:");
732 log_debug!(logger, " {:?}", package.outpoints());
733 self.claimable_outpoints.remove(&package.outpoints()[0]);
737 self.onchain_events_awaiting_threshold_conf.push(entry);
741 // Check if any pending claim request must be rescheduled
742 for (first_claim_txid, ref request) in self.pending_claim_requests.iter() {
743 if let Some(h) = request.timer() {
745 bump_candidates.insert(*first_claim_txid, (*request).clone());
750 // Build, bump and rebroadcast tx accordingly
751 log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
752 for (first_claim_txid, request) in bump_candidates.iter() {
753 if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(cur_height, &request, &*fee_estimator, &*logger) {
755 OnchainClaim::Tx(bump_tx) => {
756 log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
757 broadcaster.broadcast_transaction(&bump_tx);
760 OnchainClaim::Event(claim_event) => {
761 log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints());
762 self.pending_claim_events.insert(*first_claim_txid, claim_event);
765 if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
766 request.set_timer(new_timer);
767 request.set_feerate(new_feerate);
773 pub(crate) fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
777 fee_estimator: &LowerBoundedFeeEstimator<F>,
780 B::Target: BroadcasterInterface,
781 F::Target: FeeEstimator,
784 let mut height = None;
785 for entry in self.onchain_events_awaiting_threshold_conf.iter() {
786 if entry.txid == *txid {
787 height = Some(entry.height);
792 if let Some(height) = height {
793 self.block_disconnected(height, broadcaster, fee_estimator, logger);
797 pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L)
798 where B::Target: BroadcasterInterface,
799 F::Target: FeeEstimator,
802 let mut bump_candidates = HashMap::new();
803 let onchain_events_awaiting_threshold_conf =
804 self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
805 for entry in onchain_events_awaiting_threshold_conf {
806 if entry.height >= height {
807 //- our claim tx on a commitment tx output
808 //- resurect outpoint back in its claimable set and regenerate tx
810 OnchainEvent::ContentiousOutpoint { package } => {
811 if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&package.outpoints()[0]) {
812 if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
813 request.merge_package(package);
814 // Using a HashMap guarantee us than if we have multiple outpoints getting
815 // resurrected only one bump claim tx is going to be broadcast
816 bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone());
823 self.onchain_events_awaiting_threshold_conf.push(entry);
826 for (_first_claim_txid_height, request) in bump_candidates.iter_mut() {
827 if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) {
828 request.set_timer(new_timer);
829 request.set_feerate(new_feerate);
831 OnchainClaim::Tx(bump_tx) => {
832 log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
833 broadcaster.broadcast_transaction(&bump_tx);
836 OnchainClaim::Event(claim_event) => {
837 log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints());
838 self.pending_claim_events.insert(_first_claim_txid_height.0, claim_event);
843 for (ancestor_claim_txid, request) in bump_candidates.drain() {
844 self.pending_claim_requests.insert(ancestor_claim_txid.0, request);
846 //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
847 // right now if one of the outpoint get disconnected, just erase whole pending claim request.
848 let mut remove_request = Vec::new();
849 self.claimable_outpoints.retain(|_, ref v|
851 remove_request.push(v.0.clone());
854 for req in remove_request {
855 self.pending_claim_requests.remove(&req);
859 pub(crate) fn is_output_spend_pending(&self, outpoint: &BitcoinOutPoint) -> bool {
860 self.claimable_outpoints.get(outpoint).is_some()
863 pub(crate) fn get_relevant_txids(&self) -> Vec<Txid> {
864 let mut txids: Vec<Txid> = self.onchain_events_awaiting_threshold_conf
866 .map(|entry| entry.txid)
868 txids.sort_unstable();
873 pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
874 self.prev_holder_commitment = Some(replace(&mut self.holder_commitment, tx));
875 self.holder_htlc_sigs = None;
878 // Normally holder HTLCs are signed at the same time as the holder commitment tx. However,
879 // in some configurations, the holder commitment tx has been signed and broadcast by a
880 // ChannelMonitor replica, so we handle that case here.
881 fn sign_latest_holder_htlcs(&mut self) {
882 if self.holder_htlc_sigs.is_none() {
883 let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
884 self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, sigs));
888 // Normally only the latest commitment tx and HTLCs need to be signed. However, in some
889 // configurations we may have updated our holder commitment but a replica of the ChannelMonitor
890 // broadcast the previous one before we sync with it. We handle that case here.
891 fn sign_prev_holder_htlcs(&mut self) {
892 if self.prev_holder_htlc_sigs.is_none() {
893 if let Some(ref holder_commitment) = self.prev_holder_commitment {
894 let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(holder_commitment, &self.secp_ctx).expect("sign previous holder commitment");
895 self.prev_holder_htlc_sigs = Some(Self::extract_holder_sigs(holder_commitment, sigs));
900 fn extract_holder_sigs(holder_commitment: &HolderCommitmentTransaction, sigs: Vec<Signature>) -> Vec<Option<(usize, Signature)>> {
901 let mut ret = Vec::new();
902 for (htlc_idx, (holder_sig, htlc)) in sigs.iter().zip(holder_commitment.htlcs().iter()).enumerate() {
903 let tx_idx = htlc.transaction_output_index.unwrap();
904 if ret.len() <= tx_idx as usize { ret.resize(tx_idx as usize + 1, None); }
905 ret[tx_idx as usize] = Some((htlc_idx, holder_sig.clone()));
910 //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
911 // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
912 // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
913 // to monitor before.
914 pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
915 let (sig, htlc_sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("signing holder commitment");
916 self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
917 self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
920 #[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
921 pub(crate) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
922 let (sig, htlc_sigs) = self.signer.unsafe_sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
923 self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
924 self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
927 pub(crate) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
928 let mut htlc_tx = None;
929 let commitment_txid = self.holder_commitment.trust().txid();
930 // Check if the HTLC spends from the current holder commitment
931 if commitment_txid == outp.txid {
932 self.sign_latest_holder_htlcs();
933 if let &Some(ref htlc_sigs) = &self.holder_htlc_sigs {
934 let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
935 let trusted_tx = self.holder_commitment.trust();
936 let counterparty_htlc_sig = self.holder_commitment.counterparty_htlc_sigs[*htlc_idx];
937 htlc_tx = Some(trusted_tx
938 .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
941 // If the HTLC doesn't spend the current holder commitment, check if it spends the previous one
942 if htlc_tx.is_none() && self.prev_holder_commitment.is_some() {
943 let commitment_txid = self.prev_holder_commitment.as_ref().unwrap().trust().txid();
944 if commitment_txid == outp.txid {
945 self.sign_prev_holder_htlcs();
946 if let &Some(ref htlc_sigs) = &self.prev_holder_htlc_sigs {
947 let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
948 let holder_commitment = self.prev_holder_commitment.as_ref().unwrap();
949 let trusted_tx = holder_commitment.trust();
950 let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[*htlc_idx];
951 htlc_tx = Some(trusted_tx
952 .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
959 pub(crate) fn opt_anchors(&self) -> bool {
960 self.channel_transaction_parameters.opt_anchors.is_some()
963 #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
964 pub(crate) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
965 let latest_had_sigs = self.holder_htlc_sigs.is_some();
966 let prev_had_sigs = self.prev_holder_htlc_sigs.is_some();
967 let ret = self.get_fully_signed_htlc_tx(outp, preimage);
968 if !latest_had_sigs {
969 self.holder_htlc_sigs = None;
972 self.prev_holder_htlc_sigs = None;