1 //! The logic to build claims and bump in-flight transactions until confirmations.
3 //! OnchainTxHandler objetcs are fully-part of ChannelMonitor and encapsulates all
4 //! building, tracking, bumping and notifications functions.
6 use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType};
7 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
8 use bitcoin::blockdata::script::Script;
9 use bitcoin::util::bip143;
11 use bitcoin_hashes::sha256d::Hash as Sha256dHash;
13 use secp256k1::Secp256k1;
16 use ln::msgs::DecodeError;
17 use ln::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER, InputMaterial, ClaimRequest};
18 use ln::channelmanager::PaymentPreimage;
19 use ln::chan_utils::{HTLCType, LocalCommitmentTransaction};
20 use chain::chaininterface::{FeeEstimator, BroadcasterInterface, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
21 use chain::keysinterface::ChannelKeys;
22 use util::logger::Logger;
23 use util::ser::{ReadableArgs, Readable, Writer, Writeable};
26 use std::collections::{HashMap, hash_map};
31 const MAX_ALLOC_SIZE: usize = 64*1024;
33 /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
34 /// once they mature to enough confirmations (ANTI_REORG_DELAY)
35 #[derive(Clone, PartialEq)]
37 /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
38 /// bump-txn candidate buffer.
40 claim_request: Sha256dHash,
42 /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a remote party tx.
43 /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
44 /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
46 outpoint: BitcoinOutPoint,
47 input_material: InputMaterial,
51 /// Higher-level cache structure needed to re-generate bumped claim txn if needed
52 #[derive(Clone, PartialEq)]
53 pub struct ClaimTxBumpMaterial {
54 // At every block tick, used to check if pending claiming tx is taking too
55 // much time for confirmation and we need to bump it.
56 height_timer: Option<u32>,
57 // Tracked in case of reorg to wipe out now-superflous bump material
58 feerate_previous: u64,
59 // Soonest timelocks among set of outpoints claimed, used to compute
60 // a priority of not feerate
61 soonest_timelock: u32,
62 // Cache of script, pubkey, sig or key to solve claimable outputs scriptpubkey.
63 per_input_material: HashMap<BitcoinOutPoint, InputMaterial>,
66 impl Writeable for ClaimTxBumpMaterial {
67 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
68 self.height_timer.write(writer)?;
69 writer.write_all(&byte_utils::be64_to_array(self.feerate_previous))?;
70 writer.write_all(&byte_utils::be32_to_array(self.soonest_timelock))?;
71 writer.write_all(&byte_utils::be64_to_array(self.per_input_material.len() as u64))?;
72 for (outp, tx_material) in self.per_input_material.iter() {
74 tx_material.write(writer)?;
80 impl Readable for ClaimTxBumpMaterial {
81 fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
82 let height_timer = Readable::read(reader)?;
83 let feerate_previous = Readable::read(reader)?;
84 let soonest_timelock = Readable::read(reader)?;
85 let per_input_material_len: u64 = Readable::read(reader)?;
86 let mut per_input_material = HashMap::with_capacity(cmp::min(per_input_material_len as usize, MAX_ALLOC_SIZE / 128));
87 for _ in 0 ..per_input_material_len {
88 let outpoint = Readable::read(reader)?;
89 let input_material = Readable::read(reader)?;
90 per_input_material.insert(outpoint, input_material);
92 Ok(Self { height_timer, feerate_previous, soonest_timelock, per_input_material })
97 pub(super) enum InputDescriptors {
102 RevokedOutput, // either a revoked to_local output on commitment tx, a revoked HTLC-Timeout output or a revoked HTLC-Success output
105 macro_rules! subtract_high_prio_fee {
106 ($self: ident, $fee_estimator: expr, $value: expr, $predicted_weight: expr, $used_feerate: expr) => {
108 $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority);
109 let mut fee = $used_feerate * ($predicted_weight as u64) / 1000;
111 $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
112 fee = $used_feerate * ($predicted_weight as u64) / 1000;
114 $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
115 fee = $used_feerate * ($predicted_weight as u64) / 1000;
117 log_error!($self, "Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat)",
121 log_warn!($self, "Used low priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
127 log_warn!($self, "Used medium priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
141 /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
142 /// do RBF bumping if possible.
143 pub struct OnchainTxHandler<ChanSigner: ChannelKeys> {
144 destination_script: Script,
145 local_commitment: Option<LocalCommitmentTransaction>,
146 prev_local_commitment: Option<LocalCommitmentTransaction>,
149 key_storage: ChanSigner,
151 // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
152 // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
153 // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
154 // same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
155 // block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
156 // equality between spending transaction and claim request. If true, it means transaction was one our claiming one
157 // after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
158 // we need to regenerate new claim request with reduced set of still-claimable outpoints.
159 // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
160 // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
161 // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
162 #[cfg(test)] // Used in functional_test to verify sanitization
163 pub pending_claim_requests: HashMap<Sha256dHash, ClaimTxBumpMaterial>,
165 pending_claim_requests: HashMap<Sha256dHash, ClaimTxBumpMaterial>,
167 // Used to link outpoints claimed in a connected block to a pending claim request.
168 // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
169 // Value is (pending claim request identifier, confirmation_block), identifier
170 // is txid of the initial claiming transaction and is immutable until outpoint is
171 // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
172 // block with output gets disconnected.
173 #[cfg(test)] // Used in functional_test to verify sanitization
174 pub claimable_outpoints: HashMap<BitcoinOutPoint, (Sha256dHash, u32)>,
176 claimable_outpoints: HashMap<BitcoinOutPoint, (Sha256dHash, u32)>,
178 onchain_events_waiting_threshold_conf: HashMap<u32, Vec<OnchainEvent>>,
180 secp_ctx: Secp256k1<secp256k1::All>,
184 impl<ChanSigner: ChannelKeys + Writeable> OnchainTxHandler<ChanSigner> {
185 pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
186 self.destination_script.write(writer)?;
187 self.local_commitment.write(writer)?;
188 self.prev_local_commitment.write(writer)?;
190 self.local_csv.write(writer)?;
192 self.key_storage.write(writer)?;
194 writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
195 for (ref ancestor_claim_txid, claim_tx_data) in self.pending_claim_requests.iter() {
196 ancestor_claim_txid.write(writer)?;
197 claim_tx_data.write(writer)?;
200 writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
201 for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
203 claim_and_height.0.write(writer)?;
204 claim_and_height.1.write(writer)?;
207 writer.write_all(&byte_utils::be64_to_array(self.onchain_events_waiting_threshold_conf.len() as u64))?;
208 for (ref target, ref events) in self.onchain_events_waiting_threshold_conf.iter() {
209 writer.write_all(&byte_utils::be32_to_array(**target))?;
210 writer.write_all(&byte_utils::be64_to_array(events.len() as u64))?;
211 for ev in events.iter() {
213 OnchainEvent::Claim { ref claim_request } => {
214 writer.write_all(&[0; 1])?;
215 claim_request.write(writer)?;
217 OnchainEvent::ContentiousOutpoint { ref outpoint, ref input_material } => {
218 writer.write_all(&[1; 1])?;
219 outpoint.write(writer)?;
220 input_material.write(writer)?;
229 impl<ChanSigner: ChannelKeys + Readable> ReadableArgs<Arc<Logger>> for OnchainTxHandler<ChanSigner> {
230 fn read<R: ::std::io::Read>(reader: &mut R, logger: Arc<Logger>) -> Result<Self, DecodeError> {
231 let destination_script = Readable::read(reader)?;
233 let local_commitment = Readable::read(reader)?;
234 let prev_local_commitment = Readable::read(reader)?;
236 let local_csv = Readable::read(reader)?;
238 let key_storage = Readable::read(reader)?;
240 let pending_claim_requests_len: u64 = Readable::read(reader)?;
241 let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
242 for _ in 0..pending_claim_requests_len {
243 pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
246 let claimable_outpoints_len: u64 = Readable::read(reader)?;
247 let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
248 for _ in 0..claimable_outpoints_len {
249 let outpoint = Readable::read(reader)?;
250 let ancestor_claim_txid = Readable::read(reader)?;
251 let height = Readable::read(reader)?;
252 claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
254 let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
255 let mut onchain_events_waiting_threshold_conf = HashMap::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
256 for _ in 0..waiting_threshold_conf_len {
257 let height_target = Readable::read(reader)?;
258 let events_len: u64 = Readable::read(reader)?;
259 let mut events = Vec::with_capacity(cmp::min(events_len as usize, MAX_ALLOC_SIZE / 128));
260 for _ in 0..events_len {
261 let ev = match <u8 as Readable>::read(reader)? {
263 let claim_request = Readable::read(reader)?;
264 OnchainEvent::Claim {
269 let outpoint = Readable::read(reader)?;
270 let input_material = Readable::read(reader)?;
271 OnchainEvent::ContentiousOutpoint {
276 _ => return Err(DecodeError::InvalidValue),
280 onchain_events_waiting_threshold_conf.insert(height_target, events);
283 Ok(OnchainTxHandler {
286 prev_local_commitment,
290 pending_claim_requests,
291 onchain_events_waiting_threshold_conf,
292 secp_ctx: Secp256k1::new(),
298 impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
299 pub(super) fn new(destination_script: Script, keys: ChanSigner, local_csv: u16, logger: Arc<Logger>) -> Self {
301 let key_storage = keys;
305 local_commitment: None,
306 prev_local_commitment: None,
309 pending_claim_requests: HashMap::new(),
310 claimable_outpoints: HashMap::new(),
311 onchain_events_waiting_threshold_conf: HashMap::new(),
313 secp_ctx: Secp256k1::new(),
318 pub(super) fn get_witnesses_weight(inputs: &[InputDescriptors]) -> usize {
319 let mut tx_weight = 2; // count segwit flags
321 // We use expected weight (and not actual) as signatures and time lock delays may vary
322 tx_weight += match inp {
323 // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
324 &InputDescriptors::RevokedOfferedHTLC => {
325 1 + 1 + 73 + 1 + 33 + 1 + 133
327 // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
328 &InputDescriptors::RevokedReceivedHTLC => {
329 1 + 1 + 73 + 1 + 33 + 1 + 139
331 // number_of_witness_elements + sig_length + remotehtlc_sig + preimage_length + preimage + witness_script_length + witness_script
332 &InputDescriptors::OfferedHTLC => {
333 1 + 1 + 73 + 1 + 32 + 1 + 133
335 // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
336 &InputDescriptors::ReceivedHTLC => {
337 1 + 1 + 73 + 1 + 1 + 1 + 139
339 // number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
340 &InputDescriptors::RevokedOutput => {
341 1 + 1 + 73 + 1 + 1 + 1 + 77
348 /// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
349 /// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
350 /// height than once reached we should generate a new bumped "version" of the claim tx to be sure than we safely claim outputs before
351 /// than our counterparty can do it too. If timelock expires soon, height timer is going to be scale down in consequence to increase
352 /// frequency of the bump and so increase our bets of success.
353 fn get_height_timer(current_height: u32, timelock_expiration: u32) -> u32 {
354 if timelock_expiration <= current_height + 3 {
355 return current_height + 1
356 } else if timelock_expiration - current_height <= 15 {
357 return current_height + 3
362 /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
363 /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
364 fn generate_claim_tx<F: Deref>(&mut self, height: u32, cached_claim_datas: &ClaimTxBumpMaterial, fee_estimator: F) -> Option<(Option<u32>, u64, Transaction)>
365 where F::Target: FeeEstimator
367 if cached_claim_datas.per_input_material.len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
368 let mut inputs = Vec::new();
369 for outp in cached_claim_datas.per_input_material.keys() {
370 log_trace!(self, "Outpoint {}:{}", outp.txid, outp.vout);
372 previous_output: *outp,
373 script_sig: Script::new(),
374 sequence: 0xfffffffd,
378 let mut bumped_tx = Transaction {
383 script_pubkey: self.destination_script.clone(),
388 macro_rules! RBF_bump {
389 ($amount: expr, $old_feerate: expr, $fee_estimator: expr, $predicted_weight: expr) => {
391 let mut used_feerate;
392 // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
393 let new_fee = if $old_feerate < $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) {
394 let mut value = $amount;
395 if subtract_high_prio_fee!(self, $fee_estimator, value, $predicted_weight, used_feerate) {
396 // Overflow check is done in subtract_high_prio_fee
399 log_trace!(self, "Can't new-estimation bump new claiming tx, amount {} is too small", $amount);
402 // ...else just increase the previous feerate by 25% (because that's a nice number)
404 let fee = $old_feerate * $predicted_weight / 750;
406 log_trace!(self, "Can't 25% bump new claiming tx, amount {} is too small", $amount);
412 let previous_fee = $old_feerate * $predicted_weight / 1000;
413 let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * $predicted_weight / 1000;
414 // BIP 125 Opt-in Full Replace-by-Fee Signaling
415 // * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions.
416 // * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting.
417 let new_fee = if new_fee < previous_fee + min_relay_fee {
418 new_fee + previous_fee + min_relay_fee - new_fee
422 Some((new_fee, new_fee * 1000 / $predicted_weight))
427 // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
428 // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
429 let new_timer = Some(Self::get_height_timer(height, cached_claim_datas.soonest_timelock));
430 let mut inputs_witnesses_weight = 0;
432 let mut dynamic_fee = true;
433 for per_outp_material in cached_claim_datas.per_input_material.values() {
434 match per_outp_material {
435 &InputMaterial::Revoked { ref witness_script, ref is_htlc, ref amount, .. } => {
436 inputs_witnesses_weight += Self::get_witnesses_weight(if !is_htlc { &[InputDescriptors::RevokedOutput] } else if HTLCType::scriptlen_to_htlctype(witness_script.len()) == Some(HTLCType::OfferedHTLC) { &[InputDescriptors::RevokedOfferedHTLC] } else if HTLCType::scriptlen_to_htlctype(witness_script.len()) == Some(HTLCType::AcceptedHTLC) { &[InputDescriptors::RevokedReceivedHTLC] } else { unreachable!() });
439 &InputMaterial::RemoteHTLC { ref preimage, ref amount, .. } => {
440 inputs_witnesses_weight += Self::get_witnesses_weight(if preimage.is_some() { &[InputDescriptors::OfferedHTLC] } else { &[InputDescriptors::ReceivedHTLC] });
443 &InputMaterial::LocalHTLC { .. } => {
446 &InputMaterial::Funding { .. } => {
452 let predicted_weight = bumped_tx.get_weight() + inputs_witnesses_weight;
454 // If old feerate is 0, first iteration of this claim, use normal fee calculation
455 if cached_claim_datas.feerate_previous != 0 {
456 if let Some((new_fee, feerate)) = RBF_bump!(amt, cached_claim_datas.feerate_previous, fee_estimator, predicted_weight as u64) {
457 // If new computed fee is superior at the whole claimable amount burn all in fees
459 bumped_tx.output[0].value = 0;
461 bumped_tx.output[0].value = amt - new_fee;
463 new_feerate = feerate;
464 } else { return None; }
466 if subtract_high_prio_fee!(self, fee_estimator, amt, predicted_weight, new_feerate) {
467 bumped_tx.output[0].value = amt;
468 } else { return None; }
470 assert!(new_feerate != 0);
472 for (i, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() {
473 match per_outp_material {
474 &InputMaterial::Revoked { ref witness_script, ref pubkey, ref key, ref is_htlc, ref amount } => {
475 let sighash_parts = bip143::SighashComponents::new(&bumped_tx);
476 let sighash = hash_to_message!(&sighash_parts.sighash_all(&bumped_tx.input[i], &witness_script, *amount)[..]);
477 let sig = self.secp_ctx.sign(&sighash, &key);
478 bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
479 bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
481 bumped_tx.input[i].witness.push(pubkey.unwrap().clone().serialize().to_vec());
483 bumped_tx.input[i].witness.push(vec!(1));
485 bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
486 log_trace!(self, "Going to broadcast Penalty Transaction {} claiming revoked {} output {} from {} with new feerate {}...", bumped_tx.txid(), if !is_htlc { "to_local" } else if HTLCType::scriptlen_to_htlctype(witness_script.len()) == Some(HTLCType::OfferedHTLC) { "offered" } else if HTLCType::scriptlen_to_htlctype(witness_script.len()) == Some(HTLCType::AcceptedHTLC) { "received" } else { "" }, outp.vout, outp.txid, new_feerate);
488 &InputMaterial::RemoteHTLC { ref witness_script, ref key, ref preimage, ref amount, ref locktime } => {
489 if !preimage.is_some() { bumped_tx.lock_time = *locktime }; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
490 let sighash_parts = bip143::SighashComponents::new(&bumped_tx);
491 let sighash = hash_to_message!(&sighash_parts.sighash_all(&bumped_tx.input[i], &witness_script, *amount)[..]);
492 let sig = self.secp_ctx.sign(&sighash, &key);
493 bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
494 bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
495 if let &Some(preimage) = preimage {
496 bumped_tx.input[i].witness.push(preimage.clone().0.to_vec());
498 bumped_tx.input[i].witness.push(vec![]);
500 bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
501 log_trace!(self, "Going to broadcast Claim Transaction {} claiming remote {} htlc output {} from {} with new feerate {}...", bumped_tx.txid(), if preimage.is_some() { "offered" } else { "received" }, outp.vout, outp.txid, new_feerate);
506 log_trace!(self, "...with timer {}", new_timer.unwrap());
507 assert!(predicted_weight >= bumped_tx.get_weight());
508 return Some((new_timer, new_feerate, bumped_tx))
510 for (_, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() {
511 match per_outp_material {
512 &InputMaterial::LocalHTLC { ref preimage, ref amount } => {
513 let mut htlc_tx = None;
514 if let Some(ref mut local_commitment) = self.local_commitment {
515 if local_commitment.txid() == outp.txid {
516 self.key_storage.sign_htlc_transaction(local_commitment, outp.vout, *preimage, self.local_csv, &self.secp_ctx);
517 htlc_tx = local_commitment.htlc_with_valid_witness(outp.vout).clone();
520 if let Some(ref mut prev_local_commitment) = self.prev_local_commitment {
521 if prev_local_commitment.txid() == outp.txid {
522 self.key_storage.sign_htlc_transaction(prev_local_commitment, outp.vout, *preimage, self.local_csv, &self.secp_ctx);
523 htlc_tx = prev_local_commitment.htlc_with_valid_witness(outp.vout).clone();
526 if let Some(htlc_tx) = htlc_tx {
527 let feerate = (amount - htlc_tx.output[0].value) * 1000 / htlc_tx.get_weight() as u64;
528 // Timer set to $NEVER given we can't bump tx without anchor outputs
529 log_trace!(self, "Going to broadcast Local HTLC-{} claiming HTLC output {} from {}...", if preimage.is_some() { "Success" } else { "Timeout" }, outp.vout, outp.txid);
530 return Some((None, feerate, htlc_tx));
534 &InputMaterial::Funding { ref channel_value } => {
535 let signed_tx = self.get_fully_signed_local_tx().unwrap();
536 let mut amt_outputs = 0;
537 for outp in signed_tx.output.iter() {
538 amt_outputs += outp.value;
540 let feerate = (channel_value - amt_outputs) * 1000 / signed_tx.get_weight() as u64;
541 // Timer set to $NEVER given we can't bump tx without anchor outputs
542 log_trace!(self, "Going to broadcast Local Transaction {} claiming funding output {} from {}...", signed_tx.txid(), outp.vout, outp.txid);
543 return Some((None, feerate, signed_tx));
552 pub(super) fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, height: u32, broadcaster: B, fee_estimator: F)
553 where B::Target: BroadcasterInterface,
554 F::Target: FeeEstimator
556 log_trace!(self, "Block at height {} connected with {} claim requests", height, claimable_outpoints.len());
557 let mut new_claims = Vec::new();
558 let mut aggregated_claim = HashMap::new();
559 let mut aggregated_soonest = ::std::u32::MAX;
561 // Try to aggregate outputs if their timelock expiration isn't imminent (absolute_timelock
562 // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
563 for req in claimable_outpoints {
564 // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
565 if let Some(_) = self.claimable_outpoints.get(&req.outpoint) { log_trace!(self, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoint.txid, req.outpoint.vout); } else {
566 log_trace!(self, "Test if outpoint can be aggregated with expiration {} against {}", req.absolute_timelock, height + CLTV_SHARED_CLAIM_BUFFER);
567 if req.absolute_timelock <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
568 let mut single_input = HashMap::new();
569 single_input.insert(req.outpoint, req.witness_data);
570 new_claims.push((req.absolute_timelock, single_input));
572 aggregated_claim.insert(req.outpoint, req.witness_data);
573 if req.absolute_timelock < aggregated_soonest {
574 aggregated_soonest = req.absolute_timelock;
579 new_claims.push((aggregated_soonest, aggregated_claim));
581 // Generate claim transactions and track them to bump if necessary at
582 // height timer expiration (i.e in how many blocks we're going to take action).
583 for (soonest_timelock, claim) in new_claims.drain(..) {
584 let mut claim_material = ClaimTxBumpMaterial { height_timer: None, feerate_previous: 0, soonest_timelock, per_input_material: claim };
585 if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator) {
586 claim_material.height_timer = new_timer;
587 claim_material.feerate_previous = new_feerate;
588 let txid = tx.txid();
589 for k in claim_material.per_input_material.keys() {
590 log_trace!(self, "Registering claiming request for {}:{}", k.txid, k.vout);
591 self.claimable_outpoints.insert(k.clone(), (txid, height));
593 self.pending_claim_requests.insert(txid, claim_material);
594 log_trace!(self, "Broadcast onchain {}", log_tx!(tx));
595 broadcaster.broadcast_transaction(&tx);
599 let mut bump_candidates = HashMap::new();
600 for tx in txn_matched {
601 // Scan all input to verify is one of the outpoint spent is of interest for us
602 let mut claimed_outputs_material = Vec::new();
603 for inp in &tx.input {
604 if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
605 // If outpoint has claim request pending on it...
606 if let Some(claim_material) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
607 //... we need to verify equality between transaction outpoints and claim request
608 // outpoints to know if transaction is the original claim or a bumped one issued
610 let mut set_equality = true;
611 if claim_material.per_input_material.len() != tx.input.len() {
612 set_equality = false;
614 for (claim_inp, tx_inp) in claim_material.per_input_material.keys().zip(tx.input.iter()) {
615 if *claim_inp != tx_inp.previous_output {
616 set_equality = false;
621 macro_rules! clean_claim_request_after_safety_delay {
623 let new_event = OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() };
624 match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
625 hash_map::Entry::Occupied(mut entry) => {
626 if !entry.get().contains(&new_event) {
627 entry.get_mut().push(new_event);
630 hash_map::Entry::Vacant(entry) => {
631 entry.insert(vec![new_event]);
637 // If this is our transaction (or our counterparty spent all the outputs
638 // before we could anyway with same inputs order than us), wait for
639 // ANTI_REORG_DELAY and clean the RBF tracking map.
641 clean_claim_request_after_safety_delay!();
642 } else { // If false, generate new claim request with update outpoint set
643 let mut at_least_one_drop = false;
644 for input in tx.input.iter() {
645 if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) {
646 claimed_outputs_material.push((input.previous_output, input_material));
647 at_least_one_drop = true;
649 // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
650 if claim_material.per_input_material.is_empty() {
651 clean_claim_request_after_safety_delay!();
654 //TODO: recompute soonest_timelock to avoid wasting a bit on fees
655 if at_least_one_drop {
656 bump_candidates.insert(first_claim_txid_height.0.clone(), claim_material.clone());
659 break; //No need to iterate further, either tx is our or their
661 panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
665 for (outpoint, input_material) in claimed_outputs_material.drain(..) {
666 let new_event = OnchainEvent::ContentiousOutpoint { outpoint, input_material };
667 match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
668 hash_map::Entry::Occupied(mut entry) => {
669 if !entry.get().contains(&new_event) {
670 entry.get_mut().push(new_event);
673 hash_map::Entry::Vacant(entry) => {
674 entry.insert(vec![new_event]);
680 // After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
681 if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&height) {
684 OnchainEvent::Claim { claim_request } => {
685 // We may remove a whole set of claim outpoints here, as these one may have
686 // been aggregated in a single tx and claimed so atomically
687 if let Some(bump_material) = self.pending_claim_requests.remove(&claim_request) {
688 for outpoint in bump_material.per_input_material.keys() {
689 self.claimable_outpoints.remove(&outpoint);
693 OnchainEvent::ContentiousOutpoint { outpoint, .. } => {
694 self.claimable_outpoints.remove(&outpoint);
700 // Check if any pending claim request must be rescheduled
701 for (first_claim_txid, ref claim_data) in self.pending_claim_requests.iter() {
702 if let Some(h) = claim_data.height_timer {
704 bump_candidates.insert(*first_claim_txid, (*claim_data).clone());
709 // Build, bump and rebroadcast tx accordingly
710 log_trace!(self, "Bumping {} candidates", bump_candidates.len());
711 for (first_claim_txid, claim_material) in bump_candidates.iter() {
712 if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator) {
713 log_trace!(self, "Broadcast onchain {}", log_tx!(bump_tx));
714 broadcaster.broadcast_transaction(&bump_tx);
715 if let Some(claim_material) = self.pending_claim_requests.get_mut(first_claim_txid) {
716 claim_material.height_timer = new_timer;
717 claim_material.feerate_previous = new_feerate;
723 pub(super) fn block_disconnected<B: Deref, F: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F)
724 where B::Target: BroadcasterInterface,
725 F::Target: FeeEstimator
727 let mut bump_candidates = HashMap::new();
728 if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
729 //- our claim tx on a commitment tx output
730 //- resurect outpoint back in its claimable set and regenerate tx
733 OnchainEvent::ContentiousOutpoint { outpoint, input_material } => {
734 if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&outpoint) {
735 if let Some(claim_material) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
736 claim_material.per_input_material.insert(outpoint, input_material);
737 // Using a HashMap guarantee us than if we have multiple outpoints getting
738 // resurrected only one bump claim tx is going to be broadcast
739 bump_candidates.insert(ancestor_claimable_txid.clone(), claim_material.clone());
747 for (_, claim_material) in bump_candidates.iter_mut() {
748 if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator) {
749 claim_material.height_timer = new_timer;
750 claim_material.feerate_previous = new_feerate;
751 broadcaster.broadcast_transaction(&bump_tx);
754 for (ancestor_claim_txid, claim_material) in bump_candidates.drain() {
755 self.pending_claim_requests.insert(ancestor_claim_txid.0, claim_material);
757 //TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
758 // right now if one of the outpoint get disconnected, just erase whole pending claim request.
759 let mut remove_request = Vec::new();
760 self.claimable_outpoints.retain(|_, ref v|
762 remove_request.push(v.0.clone());
765 for req in remove_request {
766 self.pending_claim_requests.remove(&req);
770 pub(super) fn provide_latest_local_tx(&mut self, tx: LocalCommitmentTransaction) -> Result<(), ()> {
771 // To prevent any unsafe state discrepancy between offchain and onchain, once local
772 // commitment transaction has been signed due to an event (either block height for
773 // HTLC-timeout or channel force-closure), don't allow any further update of local
774 // commitment transaction view to avoid delivery of revocation secret to counterparty
775 // for the aformentionned signed transaction.
776 if let Some(ref local_commitment) = self.local_commitment {
777 if local_commitment.has_local_sig() { return Err(()) }
779 self.prev_local_commitment = self.local_commitment.take();
780 self.local_commitment = Some(tx);
784 //TODO: getting lastest local transactions should be infaillible and result in us "force-closing the channel", but we may
785 // have empty local commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
786 // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
787 // to monitor before.
788 pub(super) fn get_fully_signed_local_tx(&mut self) -> Option<Transaction> {
789 if let Some(ref mut local_commitment) = self.local_commitment {
790 self.key_storage.sign_local_commitment(local_commitment, &self.secp_ctx);
791 return Some(local_commitment.with_valid_witness().clone());
797 pub(super) fn get_fully_signed_copy_local_tx(&mut self) -> Option<Transaction> {
798 if let Some(ref mut local_commitment) = self.local_commitment {
799 let mut local_commitment = local_commitment.clone();
800 self.key_storage.unsafe_sign_local_commitment(&mut local_commitment, &self.secp_ctx);
801 return Some(local_commitment.with_valid_witness().clone());
806 pub(super) fn get_fully_signed_htlc_tx(&mut self, txid: Sha256dHash, htlc_index: u32, preimage: Option<PaymentPreimage>) -> Option<Transaction> {
807 //TODO: store preimage in OnchainTxHandler
808 if let Some(ref mut local_commitment) = self.local_commitment {
809 if local_commitment.txid() == txid {
810 self.key_storage.sign_htlc_transaction(local_commitment, htlc_index, preimage, self.local_csv, &self.secp_ctx);
811 return local_commitment.htlc_with_valid_witness(htlc_index).clone();