1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! The logic to build claims and bump in-flight transactions until confirmations.
12 //! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
13 //! building, tracking, bumping and notifications functions.
15 use bitcoin::blockdata::transaction::Transaction;
16 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
17 use bitcoin::blockdata::script::Script;
19 use bitcoin::hash_types::Txid;
21 use bitcoin::secp256k1::{Secp256k1, Signature};
22 use bitcoin::secp256k1;
24 use ln::msgs::DecodeError;
25 use ln::PaymentPreimage;
26 use ln::chan_utils::{ChannelTransactionParameters, HolderCommitmentTransaction};
27 use chain::chaininterface::{FeeEstimator, BroadcasterInterface};
28 use chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
29 use chain::keysinterface::{Sign, KeysInterface};
30 use chain::package::PackageTemplate;
31 use util::logger::Logger;
32 use util::ser::{Readable, ReadableArgs, Writer, Writeable, VecWriter};
35 use std::collections::HashMap;
38 use core::mem::replace;
40 const MAX_ALLOC_SIZE: usize = 64*1024;
42 /// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
43 /// transaction causing it.
45 /// Used to determine when the on-chain event can be considered safe from a chain reorganization.
47 struct OnchainEventEntry {
53 impl OnchainEventEntry {
54 fn confirmation_threshold(&self) -> u32 {
55 self.height + ANTI_REORG_DELAY - 1
58 fn has_reached_confirmation_threshold(&self, height: u32) -> bool {
59 height >= self.confirmation_threshold()
63 /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
64 /// once they mature to enough confirmations (ANTI_REORG_DELAY)
67 /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
68 /// bump-txn candidate buffer.
72 /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
73 /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
74 /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
76 package: PackageTemplate,
80 impl Readable for Option<Vec<Option<(usize, Signature)>>> {
81 fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
82 match Readable::read(reader)? {
85 let vlen: u64 = Readable::read(reader)?;
86 let mut ret = Vec::with_capacity(cmp::min(vlen as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<Option<(usize, Signature)>>()));
88 ret.push(match Readable::read(reader)? {
90 1u8 => Some((<u64 as Readable>::read(reader)? as usize, Readable::read(reader)?)),
91 _ => return Err(DecodeError::InvalidValue)
96 _ => Err(DecodeError::InvalidValue),
101 impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
102 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
106 (vec.len() as u64).write(writer)?;
107 for opt in vec.iter() {
109 &Some((ref idx, ref sig)) => {
111 (*idx as u64).write(writer)?;
114 &None => 0u8.write(writer)?,
118 &None => 0u8.write(writer)?,
125 /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
126 /// do RBF bumping if possible.
127 pub struct OnchainTxHandler<ChannelSigner: Sign> {
128 destination_script: Script,
129 holder_commitment: HolderCommitmentTransaction,
130 // holder_htlc_sigs and prev_holder_htlc_sigs are in the order as they appear in the commitment
131 // transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
132 // the set of HTLCs in the HolderCommitmentTransaction.
133 holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
134 prev_holder_commitment: Option<HolderCommitmentTransaction>,
135 prev_holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>>,
137 pub(super) signer: ChannelSigner,
138 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
140 // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
141 // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
142 // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
143 // same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
144 // block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
145 // equality between spending transaction and claim request. If true, it means transaction was one our claiming one
146 // after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
147 // we need to regenerate new claim request with reduced set of still-claimable outpoints.
148 // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
149 // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
150 // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
151 #[cfg(test)] // Used in functional_test to verify sanitization
152 pub(crate) pending_claim_requests: HashMap<Txid, PackageTemplate>,
154 pending_claim_requests: HashMap<Txid, PackageTemplate>,
156 // Used to link outpoints claimed in a connected block to a pending claim request.
157 // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
158 // Value is (pending claim request identifier, confirmation_block), identifier
159 // is txid of the initial claiming transaction and is immutable until outpoint is
160 // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
161 // block with output gets disconnected.
162 #[cfg(test)] // Used in functional_test to verify sanitization
163 pub claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
165 claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
167 onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
171 pub(super) secp_ctx: Secp256k1<secp256k1::All>,
174 const SERIALIZATION_VERSION: u8 = 1;
175 const MIN_SERIALIZATION_VERSION: u8 = 1;
177 impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
178 pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
179 write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
181 self.destination_script.write(writer)?;
182 self.holder_commitment.write(writer)?;
183 self.holder_htlc_sigs.write(writer)?;
184 self.prev_holder_commitment.write(writer)?;
185 self.prev_holder_htlc_sigs.write(writer)?;
187 self.channel_transaction_parameters.write(writer)?;
189 let mut key_data = VecWriter(Vec::new());
190 self.signer.write(&mut key_data)?;
191 assert!(key_data.0.len() < core::usize::MAX);
192 assert!(key_data.0.len() < core::u32::MAX as usize);
193 (key_data.0.len() as u32).write(writer)?;
194 writer.write_all(&key_data.0[..])?;
196 writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
197 for (ref ancestor_claim_txid, request) in self.pending_claim_requests.iter() {
198 ancestor_claim_txid.write(writer)?;
199 request.write(writer)?;
202 writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
203 for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
205 claim_and_height.0.write(writer)?;
206 claim_and_height.1.write(writer)?;
209 writer.write_all(&byte_utils::be64_to_array(self.onchain_events_awaiting_threshold_conf.len() as u64))?;
210 for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
211 entry.txid.write(writer)?;
212 writer.write_all(&byte_utils::be32_to_array(entry.height))?;
214 OnchainEvent::Claim { ref claim_request } => {
215 writer.write_all(&[0; 1])?;
216 claim_request.write(writer)?;
218 OnchainEvent::ContentiousOutpoint { ref package } => {
219 writer.write_all(&[1; 1])?;
220 package.write(writer)?;
224 self.latest_height.write(writer)?;
226 write_tlv_fields!(writer, {}, {});
231 impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
232 fn read<R: ::std::io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
233 let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
235 let destination_script = Readable::read(reader)?;
237 let holder_commitment = Readable::read(reader)?;
238 let holder_htlc_sigs = Readable::read(reader)?;
239 let prev_holder_commitment = Readable::read(reader)?;
240 let prev_holder_htlc_sigs = Readable::read(reader)?;
242 let channel_parameters = Readable::read(reader)?;
244 let keys_len: u32 = Readable::read(reader)?;
245 let mut keys_data = Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE));
246 while keys_data.len() != keys_len as usize {
247 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
248 let mut data = [0; 1024];
249 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.len())];
250 reader.read_exact(read_slice)?;
251 keys_data.extend_from_slice(read_slice);
253 let signer = keys_manager.read_chan_signer(&keys_data)?;
255 let pending_claim_requests_len: u64 = Readable::read(reader)?;
256 let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
257 for _ in 0..pending_claim_requests_len {
258 pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
261 let claimable_outpoints_len: u64 = Readable::read(reader)?;
262 let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
263 for _ in 0..claimable_outpoints_len {
264 let outpoint = Readable::read(reader)?;
265 let ancestor_claim_txid = Readable::read(reader)?;
266 let height = Readable::read(reader)?;
267 claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
269 let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
270 let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
271 for _ in 0..waiting_threshold_conf_len {
272 let txid = Readable::read(reader)?;
273 let height = Readable::read(reader)?;
274 let event = match <u8 as Readable>::read(reader)? {
276 let claim_request = Readable::read(reader)?;
277 OnchainEvent::Claim {
282 let package = Readable::read(reader)?;
283 OnchainEvent::ContentiousOutpoint {
287 _ => return Err(DecodeError::InvalidValue),
289 onchain_events_awaiting_threshold_conf.push(OnchainEventEntry { txid, height, event });
291 let latest_height = Readable::read(reader)?;
293 read_tlv_fields!(reader, {}, {});
295 let mut secp_ctx = Secp256k1::new();
296 secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
298 Ok(OnchainTxHandler {
302 prev_holder_commitment,
303 prev_holder_htlc_sigs,
305 channel_transaction_parameters: channel_parameters,
307 pending_claim_requests,
308 onchain_events_awaiting_threshold_conf,
315 impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
316 pub(crate) fn new(destination_script: Script, signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1<secp256k1::All>) -> Self {
320 holder_htlc_sigs: None,
321 prev_holder_commitment: None,
322 prev_holder_htlc_sigs: None,
324 channel_transaction_parameters: channel_parameters,
325 pending_claim_requests: HashMap::new(),
326 claimable_outpoints: HashMap::new(),
327 onchain_events_awaiting_threshold_conf: Vec::new(),
334 /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
335 /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
336 /// Panics if there are signing errors, because signing operations in reaction to on-chain events
337 /// are not expected to fail, and if they do, we may lose funds.
338 fn generate_claim_tx<F: Deref, L: Deref>(&mut self, height: u32, cached_request: &PackageTemplate, fee_estimator: &F, logger: &L) -> Option<(Option<u32>, u64, Transaction)>
339 where F::Target: FeeEstimator,
342 if cached_request.outpoints().len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
344 // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
345 // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
346 let new_timer = Some(cached_request.get_height_timer(height));
347 let amt = cached_request.package_amount();
348 if cached_request.is_malleable() {
349 let predicted_weight = cached_request.package_weight(&self.destination_script);
350 if let Some((output_value, new_feerate)) = cached_request.compute_package_output(predicted_weight, amt, fee_estimator, logger) {
351 assert!(new_feerate != 0);
353 let transaction = cached_request.finalize_package(self, output_value, self.destination_script.clone(), logger).unwrap();
354 log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate);
355 assert!(predicted_weight >= transaction.get_weight());
356 return Some((new_timer, new_feerate, transaction))
359 // Note: Currently, amounts of holder outputs spending witnesses aren't used
360 // as we can't malleate spending package to increase their feerate. This
361 // should change with the remaining anchor output patchset.
362 debug_assert!(amt == 0);
363 if let Some(transaction) = cached_request.finalize_package(self, amt, self.destination_script.clone(), logger) {
364 return Some((None, 0, transaction));
370 /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
371 /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
372 /// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
373 /// if we receive a preimage after force-close.
374 pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], requests: Vec<PackageTemplate>, latest_height: Option<u32>, broadcaster: &B, fee_estimator: &F, logger: &L)
375 where B::Target: BroadcasterInterface,
376 F::Target: FeeEstimator,
379 let height = match latest_height {
381 None => self.latest_height,
383 log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
384 let mut preprocessed_requests = Vec::with_capacity(requests.len());
385 let mut aggregated_request = None;
387 // Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
388 // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
389 for req in requests {
390 // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
391 if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) { log_trace!(logger, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout); } else {
392 log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), height + CLTV_SHARED_CLAIM_BUFFER);
393 if req.timelock() <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
394 // Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
395 preprocessed_requests.push(req);
396 } else if aggregated_request.is_none() {
397 aggregated_request = Some(req);
399 aggregated_request.as_mut().unwrap().merge_package(req);
403 if let Some(req) = aggregated_request {
404 preprocessed_requests.push(req);
407 // Generate claim transactions and track them to bump if necessary at
408 // height timer expiration (i.e in how many blocks we're going to take action).
409 for mut req in preprocessed_requests {
410 if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &req, &*fee_estimator, &*logger) {
411 req.set_timer(new_timer);
412 req.set_feerate(new_feerate);
413 let txid = tx.txid();
414 for k in req.outpoints() {
415 log_trace!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
416 self.claimable_outpoints.insert(k.clone(), (txid, height));
418 self.pending_claim_requests.insert(txid, req);
419 log_trace!(logger, "Broadcasting onchain {}", log_tx!(tx));
420 broadcaster.broadcast_transaction(&tx);
424 let mut bump_candidates = HashMap::new();
425 for tx in txn_matched {
426 // Scan all input to verify is one of the outpoint spent is of interest for us
427 let mut claimed_outputs_material = Vec::new();
428 for inp in &tx.input {
429 if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
430 // If outpoint has claim request pending on it...
431 if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
432 //... we need to verify equality between transaction outpoints and claim request
433 // outpoints to know if transaction is the original claim or a bumped one issued
435 let mut set_equality = true;
436 if request.outpoints().len() != tx.input.len() {
437 set_equality = false;
439 for (claim_inp, tx_inp) in request.outpoints().iter().zip(tx.input.iter()) {
440 if **claim_inp != tx_inp.previous_output {
441 set_equality = false;
446 macro_rules! clean_claim_request_after_safety_delay {
448 let entry = OnchainEventEntry {
451 event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }
453 if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
454 self.onchain_events_awaiting_threshold_conf.push(entry);
459 // If this is our transaction (or our counterparty spent all the outputs
460 // before we could anyway with same inputs order than us), wait for
461 // ANTI_REORG_DELAY and clean the RBF tracking map.
463 clean_claim_request_after_safety_delay!();
464 } else { // If false, generate new claim request with update outpoint set
465 let mut at_least_one_drop = false;
466 for input in tx.input.iter() {
467 if let Some(package) = request.split_package(&input.previous_output) {
468 claimed_outputs_material.push(package);
469 at_least_one_drop = true;
471 // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
472 if request.outpoints().is_empty() {
473 clean_claim_request_after_safety_delay!();
476 //TODO: recompute soonest_timelock to avoid wasting a bit on fees
477 if at_least_one_drop {
478 bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone());
481 break; //No need to iterate further, either tx is our or their
483 panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
487 for package in claimed_outputs_material.drain(..) {
488 let entry = OnchainEventEntry {
491 event: OnchainEvent::ContentiousOutpoint { package },
493 if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
494 self.onchain_events_awaiting_threshold_conf.push(entry);
499 // After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
500 let onchain_events_awaiting_threshold_conf =
501 self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
502 for entry in onchain_events_awaiting_threshold_conf {
503 if entry.has_reached_confirmation_threshold(height) {
505 OnchainEvent::Claim { claim_request } => {
506 // We may remove a whole set of claim outpoints here, as these one may have
507 // been aggregated in a single tx and claimed so atomically
508 if let Some(request) = self.pending_claim_requests.remove(&claim_request) {
509 for outpoint in request.outpoints() {
510 self.claimable_outpoints.remove(&outpoint);
514 OnchainEvent::ContentiousOutpoint { package } => {
515 self.claimable_outpoints.remove(&package.outpoints()[0]);
519 self.onchain_events_awaiting_threshold_conf.push(entry);
523 // Check if any pending claim request must be rescheduled
524 for (first_claim_txid, ref request) in self.pending_claim_requests.iter() {
525 if let Some(h) = request.timer() {
527 bump_candidates.insert(*first_claim_txid, (*request).clone());
532 // Build, bump and rebroadcast tx accordingly
533 log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
534 for (first_claim_txid, request) in bump_candidates.iter() {
535 if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) {
536 log_trace!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
537 broadcaster.broadcast_transaction(&bump_tx);
538 if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
539 request.set_timer(new_timer);
540 request.set_feerate(new_feerate);
546 pub(crate) fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
553 B::Target: BroadcasterInterface,
554 F::Target: FeeEstimator,
557 let mut height = None;
558 for entry in self.onchain_events_awaiting_threshold_conf.iter() {
559 if entry.txid == *txid {
560 height = Some(entry.height);
565 if let Some(height) = height {
566 self.block_disconnected(height, broadcaster, fee_estimator, logger);
570 pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F, logger: L)
571 where B::Target: BroadcasterInterface,
572 F::Target: FeeEstimator,
575 let mut bump_candidates = HashMap::new();
576 let onchain_events_awaiting_threshold_conf =
577 self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
578 for entry in onchain_events_awaiting_threshold_conf {
579 if entry.height >= height {
580 //- our claim tx on a commitment tx output
581 //- resurect outpoint back in its claimable set and regenerate tx
583 OnchainEvent::ContentiousOutpoint { package } => {
584 if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&package.outpoints()[0]) {
585 if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
586 request.merge_package(package);
587 // Using a HashMap guarantee us than if we have multiple outpoints getting
588 // resurrected only one bump claim tx is going to be broadcast
589 bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone());
596 self.onchain_events_awaiting_threshold_conf.push(entry);
599 for (_, request) in bump_candidates.iter_mut() {
600 if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &&*fee_estimator, &&*logger) {
601 request.set_timer(new_timer);
602 request.set_feerate(new_feerate);
603 log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
604 broadcaster.broadcast_transaction(&bump_tx);
607 for (ancestor_claim_txid, request) in bump_candidates.drain() {
608 self.pending_claim_requests.insert(ancestor_claim_txid.0, request);
610 //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
611 // right now if one of the outpoint get disconnected, just erase whole pending claim request.
612 let mut remove_request = Vec::new();
613 self.claimable_outpoints.retain(|_, ref v|
615 remove_request.push(v.0.clone());
618 for req in remove_request {
619 self.pending_claim_requests.remove(&req);
623 pub(crate) fn get_relevant_txids(&self) -> Vec<Txid> {
624 let mut txids: Vec<Txid> = self.onchain_events_awaiting_threshold_conf
626 .map(|entry| entry.txid)
628 txids.sort_unstable();
633 pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
634 self.prev_holder_commitment = Some(replace(&mut self.holder_commitment, tx));
635 self.holder_htlc_sigs = None;
638 // Normally holder HTLCs are signed at the same time as the holder commitment tx. However,
639 // in some configurations, the holder commitment tx has been signed and broadcast by a
640 // ChannelMonitor replica, so we handle that case here.
641 fn sign_latest_holder_htlcs(&mut self) {
642 if self.holder_htlc_sigs.is_none() {
643 let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
644 self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, sigs));
648 // Normally only the latest commitment tx and HTLCs need to be signed. However, in some
649 // configurations we may have updated our holder commitment but a replica of the ChannelMonitor
650 // broadcast the previous one before we sync with it. We handle that case here.
651 fn sign_prev_holder_htlcs(&mut self) {
652 if self.prev_holder_htlc_sigs.is_none() {
653 if let Some(ref holder_commitment) = self.prev_holder_commitment {
654 let (_sig, sigs) = self.signer.sign_holder_commitment_and_htlcs(holder_commitment, &self.secp_ctx).expect("sign previous holder commitment");
655 self.prev_holder_htlc_sigs = Some(Self::extract_holder_sigs(holder_commitment, sigs));
660 fn extract_holder_sigs(holder_commitment: &HolderCommitmentTransaction, sigs: Vec<Signature>) -> Vec<Option<(usize, Signature)>> {
661 let mut ret = Vec::new();
662 for (htlc_idx, (holder_sig, htlc)) in sigs.iter().zip(holder_commitment.htlcs().iter()).enumerate() {
663 let tx_idx = htlc.transaction_output_index.unwrap();
664 if ret.len() <= tx_idx as usize { ret.resize(tx_idx as usize + 1, None); }
665 ret[tx_idx as usize] = Some((htlc_idx, holder_sig.clone()));
670 //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
671 // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
672 // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
673 // to monitor before.
674 pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
675 let (sig, htlc_sigs) = self.signer.sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("signing holder commitment");
676 self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
677 self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
680 #[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
681 pub(crate) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
682 let (sig, htlc_sigs) = self.signer.unsafe_sign_holder_commitment_and_htlcs(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
683 self.holder_htlc_sigs = Some(Self::extract_holder_sigs(&self.holder_commitment, htlc_sigs));
684 self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
687 pub(crate) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
688 let mut htlc_tx = None;
689 let commitment_txid = self.holder_commitment.trust().txid();
690 // Check if the HTLC spends from the current holder commitment
691 if commitment_txid == outp.txid {
692 self.sign_latest_holder_htlcs();
693 if let &Some(ref htlc_sigs) = &self.holder_htlc_sigs {
694 let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
695 let trusted_tx = self.holder_commitment.trust();
696 let counterparty_htlc_sig = self.holder_commitment.counterparty_htlc_sigs[*htlc_idx];
697 htlc_tx = Some(trusted_tx
698 .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
701 // If the HTLC doesn't spend the current holder commitment, check if it spends the previous one
702 if htlc_tx.is_none() && self.prev_holder_commitment.is_some() {
703 let commitment_txid = self.prev_holder_commitment.as_ref().unwrap().trust().txid();
704 if commitment_txid == outp.txid {
705 self.sign_prev_holder_htlcs();
706 if let &Some(ref htlc_sigs) = &self.prev_holder_htlc_sigs {
707 let &(ref htlc_idx, ref htlc_sig) = htlc_sigs[outp.vout as usize].as_ref().unwrap();
708 let holder_commitment = self.prev_holder_commitment.as_ref().unwrap();
709 let trusted_tx = holder_commitment.trust();
710 let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[*htlc_idx];
711 htlc_tx = Some(trusted_tx
712 .get_signed_htlc_tx(&self.channel_transaction_parameters.as_holder_broadcastable(), *htlc_idx, &counterparty_htlc_sig, htlc_sig, preimage));
719 #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
720 pub(crate) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
721 let latest_had_sigs = self.holder_htlc_sigs.is_some();
722 let prev_had_sigs = self.prev_holder_htlc_sigs.is_some();
723 let ret = self.get_fully_signed_htlc_tx(outp, preimage);
724 if !latest_had_sigs {
725 self.holder_htlc_sigs = None;
728 self.prev_holder_htlc_sigs = None;