1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! The logic to build claims and bump in-flight transactions until confirmations.
12 //! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
13 //! building, tracking, bumping and notifications functions.
15 use bitcoin::blockdata::transaction::Transaction;
16 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
17 use bitcoin::blockdata::script::Script;
19 use bitcoin::hash_types::{Txid, BlockHash};
21 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
22 use bitcoin::secp256k1;
24 use crate::chain::keysinterface::BaseSign;
25 use crate::ln::msgs::DecodeError;
26 use crate::ln::PaymentPreimage;
28 use crate::ln::chan_utils;
29 use crate::ln::chan_utils::{ChannelTransactionParameters, HolderCommitmentTransaction};
31 use crate::chain::chaininterface::ConfirmationTarget;
32 use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
33 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
34 use crate::chain::keysinterface::{Sign, KeysInterface};
36 use crate::chain::package::PackageSolvingData;
37 use crate::chain::package::PackageTemplate;
38 use crate::util::logger::Logger;
39 use crate::util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, VecWriter};
40 use crate::util::byte_utils;
43 use crate::prelude::*;
44 use alloc::collections::BTreeMap;
47 use core::mem::replace;
50 use bitcoin::hashes::Hash;
52 const MAX_ALLOC_SIZE: usize = 64*1024;
54 /// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
55 /// transaction causing it.
57 /// Used to determine when the on-chain event can be considered safe from a chain reorganization.
58 #[derive(PartialEq, Eq)]
59 struct OnchainEventEntry {
62 block_hash: Option<BlockHash>, // Added as optional, will be filled in for any entry generated on 0.0.113 or after
66 impl OnchainEventEntry {
67 fn confirmation_threshold(&self) -> u32 {
68 self.height + ANTI_REORG_DELAY - 1
71 fn has_reached_confirmation_threshold(&self, height: u32) -> bool {
72 height >= self.confirmation_threshold()
76 /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
77 /// once they mature to enough confirmations (ANTI_REORG_DELAY)
78 #[derive(PartialEq, Eq)]
80 /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
81 /// bump-txn candidate buffer.
85 /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
86 /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
87 /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
89 package: PackageTemplate,
93 impl Writeable for OnchainEventEntry {
94 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
95 write_tlv_fields!(writer, {
96 (0, self.txid, required),
97 (1, self.block_hash, option),
98 (2, self.height, required),
99 (4, self.event, required),
105 impl MaybeReadable for OnchainEventEntry {
106 fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
107 let mut txid = Txid::all_zeros();
109 let mut block_hash = None;
110 let mut event = None;
111 read_tlv_fields!(reader, {
113 (1, block_hash, option),
114 (2, height, required),
115 (4, event, ignorable),
117 if let Some(ev) = event {
118 Ok(Some(Self { txid, height, block_hash, event: ev }))
125 impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
127 (0, claim_request, required),
129 (1, ContentiousOutpoint) => {
130 (0, package, required),
134 impl Readable for Option<Vec<Option<(usize, Signature)>>> {
135 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
136 match Readable::read(reader)? {
139 let vlen: u64 = Readable::read(reader)?;
140 let mut ret = Vec::with_capacity(cmp::min(vlen as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<Option<(usize, Signature)>>()));
142 ret.push(match Readable::read(reader)? {
144 1u8 => Some((<u64 as Readable>::read(reader)? as usize, Readable::read(reader)?)),
145 _ => return Err(DecodeError::InvalidValue)
150 _ => Err(DecodeError::InvalidValue),
155 impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
156 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
160 (vec.len() as u64).write(writer)?;
161 for opt in vec.iter() {
163 &Some((ref idx, ref sig)) => {
165 (*idx as u64).write(writer)?;
168 &None => 0u8.write(writer)?,
172 &None => 0u8.write(writer)?,
178 // Represents the different types of claims for which events are yielded externally to satisfy said
181 pub(crate) enum ClaimEvent {
182 /// Event yielded to signal that the commitment transaction fee must be bumped to claim any
183 /// encumbered funds and proceed to HTLC resolution, if any HTLCs exist.
185 package_target_feerate_sat_per_1000_weight: u32,
186 commitment_tx: Transaction,
187 anchor_output_idx: u32,
191 /// Represents the different ways an output can be claimed (i.e., spent to an address under our
192 /// control) onchain.
193 pub(crate) enum OnchainClaim {
194 /// A finalized transaction pending confirmation spending the output to claim.
197 /// An event yielded externally to signal additional inputs must be added to a transaction
198 /// pending confirmation spending the output to claim.
202 /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
203 /// do RBF bumping if possible.
204 pub struct OnchainTxHandler<ChannelSigner: Sign> {
205 destination_script: Script,
206 holder_commitment: HolderCommitmentTransaction,
207 // holder_htlc_sigs and prev_holder_htlc_sigs are in the order as they appear in the commitment
208 // transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
209 // the set of HTLCs in the HolderCommitmentTransaction.
210 holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
211 prev_holder_commitment: Option<HolderCommitmentTransaction>,
212 prev_holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
214 pub(super) signer: ChannelSigner,
215 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
217 // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
218 // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
219 // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
220 // same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
221 // block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
222 // equality between spending transaction and claim request. If true, it means transaction was one our claiming one
223 // after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
224 // we need to regenerate new claim request with reduced set of still-claimable outpoints.
225 // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
226 // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
227 // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
228 #[cfg(test)] // Used in functional_test to verify sanitization
229 pub(crate) pending_claim_requests: HashMap<Txid, PackageTemplate>,
231 pending_claim_requests: HashMap<Txid, PackageTemplate>,
233 pending_claim_events: HashMap<Txid, ClaimEvent>,
235 // Used to link outpoints claimed in a connected block to a pending claim request.
236 // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
237 // Value is (pending claim request identifier, confirmation_block), identifier
238 // is txid of the initial claiming transaction and is immutable until outpoint is
239 // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
240 // block with output gets disconnected.
241 #[cfg(test)] // Used in functional_test to verify sanitization
242 pub claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
244 claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
246 locktimed_packages: BTreeMap<u32, Vec<PackageTemplate>>,
248 onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
250 pub(super) secp_ctx: Secp256k1<secp256k1::All>,
253 const SERIALIZATION_VERSION: u8 = 1;
254 const MIN_SERIALIZATION_VERSION: u8 = 1;
256 impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
257 pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
258 write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
260 self.destination_script.write(writer)?;
261 self.holder_commitment.write(writer)?;
262 self.holder_htlc_sigs.write(writer)?;
263 self.prev_holder_commitment.write(writer)?;
264 self.prev_holder_htlc_sigs.write(writer)?;
266 self.channel_transaction_parameters.write(writer)?;
268 let mut key_data = VecWriter(Vec::new());
269 self.signer.write(&mut key_data)?;
270 assert!(key_data.0.len() < core::usize::MAX);
271 assert!(key_data.0.len() < core::u32::MAX as usize);
272 (key_data.0.len() as u32).write(writer)?;
273 writer.write_all(&key_data.0[..])?;
275 writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
276 for (ref ancestor_claim_txid, request) in self.pending_claim_requests.iter() {
277 ancestor_claim_txid.write(writer)?;
278 request.write(writer)?;
281 writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
282 for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
284 claim_and_height.0.write(writer)?;
285 claim_and_height.1.write(writer)?;
288 writer.write_all(&byte_utils::be64_to_array(self.locktimed_packages.len() as u64))?;
289 for (ref locktime, ref packages) in self.locktimed_packages.iter() {
290 locktime.write(writer)?;
291 writer.write_all(&byte_utils::be64_to_array(packages.len() as u64))?;
292 for ref package in packages.iter() {
293 package.write(writer)?;
297 writer.write_all(&byte_utils::be64_to_array(self.onchain_events_awaiting_threshold_conf.len() as u64))?;
298 for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
299 entry.write(writer)?;
302 write_tlv_fields!(writer, {});
307 impl<'a, K: KeysInterface> ReadableArgs<(&'a K, u64, [u8; 32])> for OnchainTxHandler<K::Signer> {
308 fn read<R: io::Read>(reader: &mut R, args: (&'a K, u64, [u8; 32])) -> Result<Self, DecodeError> {
309 let keys_manager = args.0;
310 let channel_value_satoshis = args.1;
311 let channel_keys_id = args.2;
313 let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
315 let destination_script = Readable::read(reader)?;
317 let holder_commitment = Readable::read(reader)?;
318 let holder_htlc_sigs = Readable::read(reader)?;
319 let prev_holder_commitment = Readable::read(reader)?;
320 let prev_holder_htlc_sigs = Readable::read(reader)?;
322 let channel_parameters = Readable::read(reader)?;
324 // Read the serialized signer bytes, but don't deserialize them, as we'll obtain our signer
325 // by re-deriving the private key material.
326 let keys_len: u32 = Readable::read(reader)?;
327 let mut bytes_read = 0;
328 while bytes_read != keys_len as usize {
329 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
330 let mut data = [0; 1024];
331 let bytes_to_read = cmp::min(1024, keys_len as usize - bytes_read);
332 let read_slice = &mut data[0..bytes_to_read];
333 reader.read_exact(read_slice)?;
334 bytes_read += bytes_to_read;
337 let mut signer = keys_manager.derive_channel_signer(channel_value_satoshis, channel_keys_id);
338 signer.provide_channel_parameters(&channel_parameters);
340 let pending_claim_requests_len: u64 = Readable::read(reader)?;
341 let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
342 for _ in 0..pending_claim_requests_len {
343 pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
346 let claimable_outpoints_len: u64 = Readable::read(reader)?;
347 let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
348 for _ in 0..claimable_outpoints_len {
349 let outpoint = Readable::read(reader)?;
350 let ancestor_claim_txid = Readable::read(reader)?;
351 let height = Readable::read(reader)?;
352 claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
355 let locktimed_packages_len: u64 = Readable::read(reader)?;
356 let mut locktimed_packages = BTreeMap::new();
357 for _ in 0..locktimed_packages_len {
358 let locktime = Readable::read(reader)?;
359 let packages_len: u64 = Readable::read(reader)?;
360 let mut packages = Vec::with_capacity(cmp::min(packages_len as usize, MAX_ALLOC_SIZE / core::mem::size_of::<PackageTemplate>()));
361 for _ in 0..packages_len {
362 packages.push(Readable::read(reader)?);
364 locktimed_packages.insert(locktime, packages);
367 let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
368 let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
369 for _ in 0..waiting_threshold_conf_len {
370 if let Some(val) = MaybeReadable::read(reader)? {
371 onchain_events_awaiting_threshold_conf.push(val);
375 read_tlv_fields!(reader, {});
377 let mut secp_ctx = Secp256k1::new();
378 secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
380 Ok(OnchainTxHandler {
384 prev_holder_commitment,
385 prev_holder_htlc_sigs,
387 channel_transaction_parameters: channel_parameters,
390 pending_claim_requests,
391 onchain_events_awaiting_threshold_conf,
393 pending_claim_events: HashMap::new(),
399 impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
400 pub(crate) fn new(destination_script: Script, signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1<secp256k1::All>) -> Self {
404 holder_htlc_sigs: None,
405 prev_holder_commitment: None,
406 prev_holder_htlc_sigs: None,
408 channel_transaction_parameters: channel_parameters,
409 pending_claim_requests: HashMap::new(),
410 claimable_outpoints: HashMap::new(),
411 locktimed_packages: BTreeMap::new(),
412 onchain_events_awaiting_threshold_conf: Vec::new(),
414 pending_claim_events: HashMap::new(),
420 pub(crate) fn get_prev_holder_commitment_to_self_value(&self) -> Option<u64> {
421 self.prev_holder_commitment.as_ref().map(|commitment| commitment.to_broadcaster_value_sat())
424 pub(crate) fn get_cur_holder_commitment_to_self_value(&self) -> u64 {
425 self.holder_commitment.to_broadcaster_value_sat()
429 pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<ClaimEvent> {
430 let mut ret = HashMap::new();
431 swap(&mut ret, &mut self.pending_claim_events);
432 ret.into_iter().map(|(_, event)| event).collect::<Vec<_>>()
435 /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty
436 /// onchain) lays on the assumption of claim transactions getting confirmed before timelock
437 /// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck
438 /// in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or
439 /// Child-Pay-For-Parent.
441 /// Panics if there are signing errors, because signing operations in reaction to on-chain
442 /// events are not expected to fail, and if they do, we may lose funds.
443 fn generate_claim<F: Deref, L: Deref>(&mut self, cur_height: u32, cached_request: &PackageTemplate, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L) -> Option<(Option<u32>, u64, OnchainClaim)>
444 where F::Target: FeeEstimator,
447 let request_outpoints = cached_request.outpoints();
448 if request_outpoints.is_empty() {
449 // Don't prune pending claiming request yet, we may have to resurrect HTLCs. Untractable
450 // packages cannot be aggregated and will never be split, so we cannot end up with an
452 debug_assert!(cached_request.is_malleable());
455 // If we've seen transaction inclusion in the chain for all outpoints in our request, we
456 // don't need to continue generating more claims. We'll keep tracking the request to fully
457 // remove it once it reaches the confirmation threshold, or to generate a new claim if the
458 // transaction is reorged out.
459 let mut all_inputs_have_confirmed_spend = true;
460 for outpoint in &request_outpoints {
461 if let Some(first_claim_txid_height) = self.claimable_outpoints.get(outpoint) {
462 // We check for outpoint spends within claims individually rather than as a set
463 // since requests can have outpoints split off.
464 if !self.onchain_events_awaiting_threshold_conf.iter()
465 .any(|event_entry| if let OnchainEvent::Claim { claim_request } = event_entry.event {
466 first_claim_txid_height.0 == claim_request
468 // The onchain event is not a claim, keep seeking until we find one.
472 // Either we had no `OnchainEvent::Claim`, or we did but none matched the
473 // outpoint's registered spend.
474 all_inputs_have_confirmed_spend = false;
477 // The request's outpoint spend does not exist yet.
478 all_inputs_have_confirmed_spend = false;
481 if all_inputs_have_confirmed_spend {
485 // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
486 // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
487 let new_timer = Some(cached_request.get_height_timer(cur_height));
488 if cached_request.is_malleable() {
489 let predicted_weight = cached_request.package_weight(&self.destination_script);
490 if let Some((output_value, new_feerate)) =
491 cached_request.compute_package_output(predicted_weight, self.destination_script.dust_value().to_sat(), fee_estimator, logger) {
492 assert!(new_feerate != 0);
494 let transaction = cached_request.finalize_malleable_package(self, output_value, self.destination_script.clone(), logger).unwrap();
495 log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate);
496 assert!(predicted_weight >= transaction.weight());
497 return Some((new_timer, new_feerate, OnchainClaim::Tx(transaction)))
500 // Untractable packages cannot have their fees bumped through Replace-By-Fee. Some
501 // packages may support fee bumping through Child-Pays-For-Parent, indicated by those
502 // which require external funding.
504 let inputs = cached_request.inputs();
506 let mut inputs = cached_request.inputs();
507 debug_assert_eq!(inputs.len(), 1);
508 let tx = match cached_request.finalize_untractable_package(self, logger) {
512 if !cached_request.requires_external_funding() {
513 return Some((None, 0, OnchainClaim::Tx(tx)));
516 return inputs.find_map(|input| match input {
517 // Commitment inputs with anchors support are the only untractable inputs supported
518 // thus far that require external funding.
519 PackageSolvingData::HolderFundingOutput(..) => {
520 debug_assert_eq!(tx.txid(), self.holder_commitment.trust().txid(),
521 "Holder commitment transaction mismatch");
522 // We'll locate an anchor output we can spend within the commitment transaction.
523 let funding_pubkey = &self.channel_transaction_parameters.holder_pubkeys.funding_pubkey;
524 match chan_utils::get_anchor_output(&tx, funding_pubkey) {
525 // An anchor output was found, so we should yield a funding event externally.
527 // TODO: Use a lower confirmation target when both our and the
528 // counterparty's latest commitment don't have any HTLCs present.
529 let conf_target = ConfirmationTarget::HighPriority;
530 let package_target_feerate_sat_per_1000_weight = cached_request
531 .compute_package_feerate(fee_estimator, conf_target);
534 package_target_feerate_sat_per_1000_weight as u64,
535 OnchainClaim::Event(ClaimEvent::BumpCommitment {
536 package_target_feerate_sat_per_1000_weight,
537 commitment_tx: tx.clone(),
538 anchor_output_idx: idx,
542 // An anchor output was not found. There's nothing we can do other than
543 // attempt to broadcast the transaction with its current fee rate and hope
544 // it confirms. This is essentially the same behavior as a commitment
545 // transaction without anchor outputs.
546 None => Some((None, 0, OnchainClaim::Tx(tx.clone()))),
550 debug_assert!(false, "Only HolderFundingOutput inputs should be untractable and require external funding");
558 /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
559 /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
560 /// Together with `update_claims_view_from_matched_txn` this used to be named
561 /// `block_connected`, but it is now also used for claiming an HTLC output if we receive a
562 /// preimage after force-close.
564 /// `conf_height` represents the height at which the request was generated. This
565 /// does not need to equal the current blockchain tip height, which should be provided via
566 /// `cur_height`, however it must never be higher than `cur_height`.
567 pub(crate) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Deref>(
568 &mut self, requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
569 broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
571 B::Target: BroadcasterInterface,
572 F::Target: FeeEstimator,
575 log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
576 let mut preprocessed_requests = Vec::with_capacity(requests.len());
577 let mut aggregated_request = None;
579 // Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
580 // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
581 for req in requests {
582 // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
583 if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
584 log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
586 let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
587 .find(|locked_package| locked_package.outpoints() == req.outpoints());
588 if let Some(package) = timelocked_equivalent_package {
589 log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
590 req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_timelock());
594 if req.package_timelock() > cur_height + 1 {
595 log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), cur_height);
596 for outpoint in req.outpoints() {
597 log_info!(logger, " Outpoint {}", outpoint);
599 self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req);
603 log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), cur_height + CLTV_SHARED_CLAIM_BUFFER);
604 if req.timelock() <= cur_height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
605 // Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
606 preprocessed_requests.push(req);
607 } else if aggregated_request.is_none() {
608 aggregated_request = Some(req);
610 aggregated_request.as_mut().unwrap().merge_package(req);
614 if let Some(req) = aggregated_request {
615 preprocessed_requests.push(req);
618 // Claim everything up to and including cur_height + 1
619 let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 2));
620 for (pop_height, mut entry) in self.locktimed_packages.iter_mut() {
621 log_trace!(logger, "Restoring delayed claim of package(s) at their timelock at {}.", pop_height);
622 preprocessed_requests.append(&mut entry);
624 self.locktimed_packages = remaining_locked_packages;
626 // Generate claim transactions and track them to bump if necessary at
627 // height timer expiration (i.e in how many blocks we're going to take action).
628 for mut req in preprocessed_requests {
629 if let Some((new_timer, new_feerate, claim)) = self.generate_claim(cur_height, &req, &*fee_estimator, &*logger) {
630 req.set_timer(new_timer);
631 req.set_feerate(new_feerate);
632 let txid = match claim {
633 OnchainClaim::Tx(tx) => {
634 log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
635 broadcaster.broadcast_transaction(&tx);
639 OnchainClaim::Event(claim_event) => {
640 log_info!(logger, "Yielding onchain event to spend inputs {:?}", req.outpoints());
641 let txid = match claim_event {
642 ClaimEvent::BumpCommitment { ref commitment_tx, .. } => commitment_tx.txid(),
644 self.pending_claim_events.insert(txid, claim_event);
648 for k in req.outpoints() {
649 log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
650 self.claimable_outpoints.insert(k.clone(), (txid, conf_height));
652 self.pending_claim_requests.insert(txid, req);
657 /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
658 /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
659 /// Together with `update_claims_view_from_requests` this used to be named `block_connected`,
660 /// but it is now also used for claiming an HTLC output if we receive a preimage after force-close.
662 /// `conf_height` represents the height at which the transactions in `txn_matched` were
663 /// confirmed. This does not need to equal the current blockchain tip height, which should be
664 /// provided via `cur_height`, however it must never be higher than `cur_height`.
665 pub(crate) fn update_claims_view_from_matched_txn<B: Deref, F: Deref, L: Deref>(
666 &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash,
667 cur_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
669 B::Target: BroadcasterInterface,
670 F::Target: FeeEstimator,
673 log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height);
674 let mut bump_candidates = HashMap::new();
675 for tx in txn_matched {
676 // Scan all input to verify is one of the outpoint spent is of interest for us
677 let mut claimed_outputs_material = Vec::new();
678 for inp in &tx.input {
679 if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
680 // If outpoint has claim request pending on it...
681 if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
682 //... we need to verify equality between transaction outpoints and claim request
683 // outpoints to know if transaction is the original claim or a bumped one issued
685 let mut set_equality = true;
686 if request.outpoints().len() != tx.input.len() {
687 set_equality = false;
689 for (claim_inp, tx_inp) in request.outpoints().iter().zip(tx.input.iter()) {
690 if **claim_inp != tx_inp.previous_output {
691 set_equality = false;
696 macro_rules! clean_claim_request_after_safety_delay {
698 let entry = OnchainEventEntry {
701 block_hash: Some(conf_hash),
702 event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }
704 if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
705 self.onchain_events_awaiting_threshold_conf.push(entry);
710 // If this is our transaction (or our counterparty spent all the outputs
711 // before we could anyway with same inputs order than us), wait for
712 // ANTI_REORG_DELAY and clean the RBF tracking map.
714 clean_claim_request_after_safety_delay!();
715 } else { // If false, generate new claim request with update outpoint set
716 let mut at_least_one_drop = false;
717 for input in tx.input.iter() {
718 if let Some(package) = request.split_package(&input.previous_output) {
719 claimed_outputs_material.push(package);
720 at_least_one_drop = true;
722 // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
723 if request.outpoints().is_empty() {
724 clean_claim_request_after_safety_delay!();
727 //TODO: recompute soonest_timelock to avoid wasting a bit on fees
728 if at_least_one_drop {
729 bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone());
732 break; //No need to iterate further, either tx is our or their
734 panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
738 for package in claimed_outputs_material.drain(..) {
739 let entry = OnchainEventEntry {
742 block_hash: Some(conf_hash),
743 event: OnchainEvent::ContentiousOutpoint { package },
745 if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
746 self.onchain_events_awaiting_threshold_conf.push(entry);
751 // After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
752 let onchain_events_awaiting_threshold_conf =
753 self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
754 for entry in onchain_events_awaiting_threshold_conf {
755 if entry.has_reached_confirmation_threshold(cur_height) {
757 OnchainEvent::Claim { claim_request } => {
758 // We may remove a whole set of claim outpoints here, as these one may have
759 // been aggregated in a single tx and claimed so atomically
760 if let Some(request) = self.pending_claim_requests.remove(&claim_request) {
761 for outpoint in request.outpoints() {
762 log_debug!(logger, "Removing claim tracking for {} due to maturation of claim tx {}.", outpoint, claim_request);
763 self.claimable_outpoints.remove(&outpoint);
765 self.pending_claim_events.remove(&claim_request);
769 OnchainEvent::ContentiousOutpoint { package } => {
770 log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:");
771 log_debug!(logger, " {:?}", package.outpoints());
772 self.claimable_outpoints.remove(&package.outpoints()[0]);
776 self.onchain_events_awaiting_threshold_conf.push(entry);
780 // Check if any pending claim request must be rescheduled
781 for (first_claim_txid, ref request) in self.pending_claim_requests.iter() {
782 if let Some(h) = request.timer() {
784 bump_candidates.insert(*first_claim_txid, (*request).clone());
789 // Build, bump and rebroadcast tx accordingly
790 log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
791 for (first_claim_txid, request) in bump_candidates.iter() {
792 if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(cur_height, &request, &*fee_estimator, &*logger) {
794 OnchainClaim::Tx(bump_tx) => {
795 log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
796 broadcaster.broadcast_transaction(&bump_tx);
799 OnchainClaim::Event(claim_event) => {
800 log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints());
801 self.pending_claim_events.insert(*first_claim_txid, claim_event);
804 if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
805 request.set_timer(new_timer);
806 request.set_feerate(new_feerate);
812 pub(crate) fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
816 fee_estimator: &LowerBoundedFeeEstimator<F>,
819 B::Target: BroadcasterInterface,
820 F::Target: FeeEstimator,
823 let mut height = None;
824 for entry in self.onchain_events_awaiting_threshold_conf.iter() {
825 if entry.txid == *txid {
826 height = Some(entry.height);
831 if let Some(height) = height {
832 self.block_disconnected(height, broadcaster, fee_estimator, logger);
836 pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L)
837 where B::Target: BroadcasterInterface,
838 F::Target: FeeEstimator,
841 let mut bump_candidates = HashMap::new();
842 let onchain_events_awaiting_threshold_conf =
843 self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
844 for entry in onchain_events_awaiting_threshold_conf {
845 if entry.height >= height {
846 //- our claim tx on a commitment tx output
847 //- resurect outpoint back in its claimable set and regenerate tx
849 OnchainEvent::ContentiousOutpoint { package } => {
850 if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&package.outpoints()[0]) {
851 if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
852 request.merge_package(package);
853 // Using a HashMap guarantee us than if we have multiple outpoints getting
854 // resurrected only one bump claim tx is going to be broadcast
855 bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone());
862 self.onchain_events_awaiting_threshold_conf.push(entry);
865 for (_first_claim_txid_height, request) in bump_candidates.iter_mut() {
866 if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) {
867 request.set_timer(new_timer);
868 request.set_feerate(new_feerate);
870 OnchainClaim::Tx(bump_tx) => {
871 log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
872 broadcaster.broadcast_transaction(&bump_tx);
875 OnchainClaim::Event(claim_event) => {
876 log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints());
877 self.pending_claim_events.insert(_first_claim_txid_height.0, claim_event);
882 for (ancestor_claim_txid, request) in bump_candidates.drain() {
883 self.pending_claim_requests.insert(ancestor_claim_txid.0, request);
885 //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
886 // right now if one of the outpoint get disconnected, just erase whole pending claim request.
887 let mut remove_request = Vec::new();
888 self.claimable_outpoints.retain(|_, ref v|
890 remove_request.push(v.0.clone());
893 for req in remove_request {
894 self.pending_claim_requests.remove(&req);
898 pub(crate) fn is_output_spend_pending(&self, outpoint: &BitcoinOutPoint) -> bool {
899 self.claimable_outpoints.get(outpoint).is_some()
902 pub(crate) fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
903 let mut txids: Vec<(Txid, Option<BlockHash>)> = self.onchain_events_awaiting_threshold_conf
905 .map(|entry| (entry.txid, entry.block_hash))
907 txids.sort_unstable_by_key(|(txid, _)| *txid);
912 pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
913 self.prev_holder_commitment = Some(replace(&mut self.holder_commitment, tx));
914 self.holder_htlc_sigs = None;
917 // Normally holder HTLCs are signed at the same time as the holder commitment tx. However,
918 // in some configurations, the holder commitment tx has been signed and broadcast by a
919 // ChannelMonitor replica, so we handle that case here.
920 fn sign_latest_holder_htlcs(&mut self) {
921 if self.holder_htlc_sigs.is_none() {
922 let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
923 self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, sigs));
927 // Normally only the latest commitment tx and HTLCs need to be signed. However, in some
928 // configurations we may have updated our holder commitment but a replica of the ChannelMonitor
929 // broadcast the previous one before we sync with it. We handle that case here.
930 fn sign_prev_holder_htlcs(&mut self) {
931 if self.prev_holder_htlc_sigs.is_none() {
932 if let Some(ref holder_commitment) = self.prev_holder_commitment {
933 let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(holder_commitment, &self.secp_ctx).expect("sign previous holder commitment");
934 self.prev_holder_htlc_sigs = Some(Self::extract_holder_sigs(holder_commitment, sigs));
939 fn extract_holder_sigs(holder_commitment: &HolderCommitmentTransaction, sigs: Vec<Signature>) -> Vec<Option<(usize, Signature)>> {
940 let mut ret = Vec::new();
941 for (htlc_idx, (holder_sig, htlc)) in sigs.iter().zip(holder_commitment.htlcs().iter()).enumerate() {
942 let tx_idx = htlc.transaction_output_index.unwrap();
943 if ret.len() <= tx_idx as usize { ret.resize(tx_idx as usize + 1, None); }
944 ret[tx_idx as usize] = Some((htlc_idx, holder_sig.clone()));
949 //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
950 // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
951 // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
952 // to monitor before.
953 pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
954 let (sig, htlc_sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("signing holder commitment");
955 self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
956 self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
959 #[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
960 pub(crate) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
961 let (sig, htlc_sigs) = self.signer.unsafe_sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
962 self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
963 self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
966 pub(crate) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
967 let mut htlc_tx = None;
968 let commitment_txid = self.holder_commitment.trust().txid();
969 // Check if the HTLC spends from the current holder commitment
970 if commitment_txid == outp.txid {
971 self.sign_latest_holder_htlcs();
972 if let &Some(ref htlc_sigs) = &self.holder_htlc_sigs {
973 let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
974 let trusted_tx = self.holder_commitment.trust();
975 let counterparty_htlc_sig = self.holder_commitment.counterparty_htlc_sigs[*htlc_idx];
976 htlc_tx = Some(trusted_tx
977 .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
980 // If the HTLC doesn't spend the current holder commitment, check if it spends the previous one
981 if htlc_tx.is_none() && self.prev_holder_commitment.is_some() {
982 let commitment_txid = self.prev_holder_commitment.as_ref().unwrap().trust().txid();
983 if commitment_txid == outp.txid {
984 self.sign_prev_holder_htlcs();
985 if let &Some(ref htlc_sigs) = &self.prev_holder_htlc_sigs {
986 let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
987 let holder_commitment = self.prev_holder_commitment.as_ref().unwrap();
988 let trusted_tx = holder_commitment.trust();
989 let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[*htlc_idx];
990 htlc_tx = Some(trusted_tx
991 .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
998 pub(crate) fn opt_anchors(&self) -> bool {
999 self.channel_transaction_parameters.opt_anchors.is_some()
1002 #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
1003 pub(crate) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
1004 let latest_had_sigs = self.holder_htlc_sigs.is_some();
1005 let prev_had_sigs = self.prev_holder_htlc_sigs.is_some();
1006 let ret = self.get_fully_signed_htlc_tx(outp, preimage);
1007 if !latest_had_sigs {
1008 self.holder_htlc_sigs = None;
1011 self.prev_holder_htlc_sigs = None;