+ let (sig, revocation_key) = match self.key_storage {
+ Storage::Local { ref revocation_base_key, .. } => {
+ let sighash = hash_to_message!(&sighash_parts.sighash_all(&spend_tx.input[0], &redeemscript, amount)[..]);
+ let revocation_key = ignore_error!(chan_utils::derive_private_revocation_key(&self.secp_ctx, &per_commitment_key, &revocation_base_key));
+ (self.secp_ctx.sign(&sighash, &revocation_key), revocation_key)
+ }
+ Storage::Watchtower { .. } => {
+ unimplemented!();
+ }
+ };
+ spend_tx.input[0].witness.push(sig.serialize_der().to_vec());
+ spend_tx.input[0].witness[0].push(SigHashType::All as u8);
+ spend_tx.input[0].witness.push(vec!(1));
+ spend_tx.input[0].witness.push(redeemscript.clone().into_bytes());
+
+ assert!(predicted_weight >= spend_tx.get_weight());
+ let outpoint = BitcoinOutPoint { txid: spend_tx.txid(), vout: 0 };
+ let output = spend_tx.output[0].clone();
+ let height_timer = Self::get_height_timer(height, self.their_to_self_delay.unwrap() as u32); // We can safely unwrap given we are past channel opening
+ match self.our_claim_txn_waiting_first_conf.entry(spend_tx.input[0].previous_output.clone()) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((height_timer, TxMaterial::Revoked { script: redeemscript, pubkey: None, key: revocation_key, is_htlc: false, amount: tx.output[0].value }, used_feerate, height + self.our_to_self_delay as u32, height)); }
+ }
+ (Some(spend_tx), Some(SpendableOutputDescriptor::StaticOutput { outpoint, output }))
+ } else { (None, None) }
+ }
+
+ fn broadcast_by_local_state(&self, local_tx: &LocalSignedTx, per_commitment_point: &Option<PublicKey>, delayed_payment_base_key: &Option<SecretKey>, height: u32) -> (Vec<Transaction>, Vec<SpendableOutputDescriptor>, Vec<TxOut>, Vec<(BitcoinOutPoint, (u32, TxMaterial, u64, u32, u32))>) {
+ let mut res = Vec::with_capacity(local_tx.htlc_outputs.len());
+ let mut spendable_outputs = Vec::with_capacity(local_tx.htlc_outputs.len());
+ let mut watch_outputs = Vec::with_capacity(local_tx.htlc_outputs.len());
+ let mut pending_claims = Vec::with_capacity(local_tx.htlc_outputs.len());
+
+ macro_rules! add_dynamic_output {
+ ($father_tx: expr, $vout: expr) => {
+ if let Some(ref per_commitment_point) = *per_commitment_point {
+ if let Some(ref delayed_payment_base_key) = *delayed_payment_base_key {
+ if let Ok(local_delayedkey) = chan_utils::derive_private_key(&self.secp_ctx, per_commitment_point, delayed_payment_base_key) {
+ spendable_outputs.push(SpendableOutputDescriptor::DynamicOutputP2WSH {
+ outpoint: BitcoinOutPoint { txid: $father_tx.txid(), vout: $vout },
+ key: local_delayedkey,
+ witness_script: chan_utils::get_revokeable_redeemscript(&local_tx.revocation_key, self.our_to_self_delay, &local_tx.delayed_payment_key),
+ to_self_delay: self.our_to_self_delay,
+ output: $father_tx.output[$vout as usize].clone(),
+ });
+ }
+ }
+ }
+ }
+ }
+
+
+ let redeemscript = chan_utils::get_revokeable_redeemscript(&local_tx.revocation_key, self.their_to_self_delay.unwrap(), &local_tx.delayed_payment_key);
+ let revokeable_p2wsh = redeemscript.to_v0_p2wsh();
+ for (idx, output) in local_tx.tx.output.iter().enumerate() {
+ if output.script_pubkey == revokeable_p2wsh {
+ add_dynamic_output!(local_tx.tx, idx as u32);
+ break;
+ }
+ }
+
+ for &(ref htlc, ref sigs, _) in local_tx.htlc_outputs.iter() {
+ if let Some(transaction_output_index) = htlc.transaction_output_index {
+ if let &Some((ref their_sig, ref our_sig)) = sigs {
+ if htlc.offered {
+ log_trace!(self, "Broadcasting HTLC-Timeout transaction against local commitment transactions");
+ let mut htlc_timeout_tx = chan_utils::build_htlc_transaction(&local_tx.txid, local_tx.feerate_per_kw, self.their_to_self_delay.unwrap(), htlc, &local_tx.delayed_payment_key, &local_tx.revocation_key);
+
+ htlc_timeout_tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
+
+ htlc_timeout_tx.input[0].witness.push(their_sig.serialize_der().to_vec());
+ htlc_timeout_tx.input[0].witness[1].push(SigHashType::All as u8);
+ htlc_timeout_tx.input[0].witness.push(our_sig.serialize_der().to_vec());
+ htlc_timeout_tx.input[0].witness[2].push(SigHashType::All as u8);
+
+ htlc_timeout_tx.input[0].witness.push(Vec::new());
+ let htlc_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(htlc, &local_tx.a_htlc_key, &local_tx.b_htlc_key, &local_tx.revocation_key);
+ htlc_timeout_tx.input[0].witness.push(htlc_script.clone().into_bytes());
+
+ add_dynamic_output!(htlc_timeout_tx, 0);
+ let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
+ pending_claims.push((htlc_timeout_tx.input[0].previous_output.clone(), (height_timer, TxMaterial::LocalHTLC { script: htlc_script, sigs: (*their_sig, *our_sig), preimage: None, amount: htlc.amount_msat / 1000}, 0, htlc.cltv_expiry, height)));
+ res.push(htlc_timeout_tx);
+ } else {
+ if let Some(payment_preimage) = self.payment_preimages.get(&htlc.payment_hash) {
+ log_trace!(self, "Broadcasting HTLC-Success transaction against local commitment transactions");
+ let mut htlc_success_tx = chan_utils::build_htlc_transaction(&local_tx.txid, local_tx.feerate_per_kw, self.their_to_self_delay.unwrap(), htlc, &local_tx.delayed_payment_key, &local_tx.revocation_key);
+
+ htlc_success_tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
+
+ htlc_success_tx.input[0].witness.push(their_sig.serialize_der().to_vec());
+ htlc_success_tx.input[0].witness[1].push(SigHashType::All as u8);
+ htlc_success_tx.input[0].witness.push(our_sig.serialize_der().to_vec());
+ htlc_success_tx.input[0].witness[2].push(SigHashType::All as u8);
+
+ htlc_success_tx.input[0].witness.push(payment_preimage.0.to_vec());
+ let htlc_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(htlc, &local_tx.a_htlc_key, &local_tx.b_htlc_key, &local_tx.revocation_key);
+ htlc_success_tx.input[0].witness.push(htlc_script.clone().into_bytes());
+
+ add_dynamic_output!(htlc_success_tx, 0);
+ let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
+ pending_claims.push((htlc_success_tx.input[0].previous_output.clone(), (height_timer, TxMaterial::LocalHTLC { script: htlc_script, sigs: (*their_sig, *our_sig), preimage: Some(*payment_preimage), amount: htlc.amount_msat / 1000}, 0, htlc.cltv_expiry, height)));
+ res.push(htlc_success_tx);
+ }
+ }
+ watch_outputs.push(local_tx.tx.output[transaction_output_index as usize].clone());
+ } else { panic!("Should have sigs for non-dust local tx outputs!") }
+ }
+ }
+
+ (res, spendable_outputs, watch_outputs, pending_claims)
+ }
+
+ /// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
+ /// revoked using data in local_claimable_outpoints.
+ /// Should not be used if check_spend_revoked_transaction succeeds.
+ fn check_spend_local_transaction(&mut self, tx: &Transaction, height: u32) -> (Vec<Transaction>, Vec<SpendableOutputDescriptor>, (Sha256dHash, Vec<TxOut>)) {
+ let commitment_txid = tx.txid();
+ let mut local_txn = Vec::new();
+ let mut spendable_outputs = Vec::new();
+ let mut watch_outputs = Vec::new();
+
+ macro_rules! wait_threshold_conf {
+ ($height: expr, $source: expr, $commitment_tx: expr, $payment_hash: expr) => {
+ log_trace!(self, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
+ match self.onchain_events_waiting_threshold_conf.entry($height + ANTI_REORG_DELAY - 1) {
+ hash_map::Entry::Occupied(mut entry) => {
+ let e = entry.get_mut();
+ e.retain(|ref event| {
+ match **event {
+ OnchainEvent::HTLCUpdate { ref htlc_update } => {
+ return htlc_update.0 != $source
+ },
+ _ => return true
+ }
+ });
+ e.push(OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)});
+ }
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)}]);
+ }
+ }
+ }
+ }
+
+ macro_rules! append_onchain_update {
+ ($updates: expr) => {
+ local_txn.append(&mut $updates.0);
+ spendable_outputs.append(&mut $updates.1);
+ watch_outputs.append(&mut $updates.2);
+ for claim in $updates.3 {
+ match self.our_claim_txn_waiting_first_conf.entry(claim.0) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert(claim.1); }
+ }
+ }
+ }
+ }
+
+ // HTLCs set may differ between last and previous local commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward
+ let mut is_local_tx = false;
+
+ if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
+ if local_tx.txid == commitment_txid {
+ is_local_tx = true;
+ log_trace!(self, "Got latest local commitment tx broadcast, searching for available HTLCs to claim");
+ match self.key_storage {
+ Storage::Local { ref delayed_payment_base_key, ref latest_per_commitment_point, .. } => {
+ append_onchain_update!(self.broadcast_by_local_state(local_tx, latest_per_commitment_point, &Some(*delayed_payment_base_key), height));
+ },
+ Storage::Watchtower { .. } => {
+ append_onchain_update!(self.broadcast_by_local_state(local_tx, &None, &None, height));
+ }
+ }
+ }
+ }
+ if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx {
+ if local_tx.txid == commitment_txid {
+ is_local_tx = true;
+ log_trace!(self, "Got previous local commitment tx broadcast, searching for available HTLCs to claim");
+ match self.key_storage {
+ Storage::Local { ref delayed_payment_base_key, ref prev_latest_per_commitment_point, .. } => {
+ append_onchain_update!(self.broadcast_by_local_state(local_tx, prev_latest_per_commitment_point, &Some(*delayed_payment_base_key), height));
+ },
+ Storage::Watchtower { .. } => {
+ append_onchain_update!(self.broadcast_by_local_state(local_tx, &None, &None, height));
+ }
+ }
+ }
+ }
+
+ macro_rules! fail_dust_htlcs_after_threshold_conf {
+ ($local_tx: expr) => {
+ for &(ref htlc, _, ref source) in &$local_tx.htlc_outputs {
+ if htlc.transaction_output_index.is_none() {
+ if let &Some(ref source) = source {
+ wait_threshold_conf!(height, source.clone(), "lastest", htlc.payment_hash.clone());
+ }
+ }
+ }
+ }
+ }
+
+ if is_local_tx {
+ if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
+ fail_dust_htlcs_after_threshold_conf!(local_tx);
+ }
+ if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx {
+ fail_dust_htlcs_after_threshold_conf!(local_tx);
+ }
+ }
+
+ (local_txn, spendable_outputs, (commitment_txid, watch_outputs))
+ }
+
+ /// Generate a spendable output event when closing_transaction get registered onchain.
+ fn check_spend_closing_transaction(&self, tx: &Transaction) -> Option<SpendableOutputDescriptor> {
+ if tx.input[0].sequence == 0xFFFFFFFF && !tx.input[0].witness.is_empty() && tx.input[0].witness.last().unwrap().len() == 71 {
+ match self.key_storage {
+ Storage::Local { ref shutdown_pubkey, .. } => {
+ let our_channel_close_key_hash = Hash160::hash(&shutdown_pubkey.serialize());
+ let shutdown_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_close_key_hash[..]).into_script();
+ for (idx, output) in tx.output.iter().enumerate() {
+ if shutdown_script == output.script_pubkey {
+ return Some(SpendableOutputDescriptor::StaticOutput {
+ outpoint: BitcoinOutPoint { txid: tx.txid(), vout: idx as u32 },
+ output: output.clone(),
+ });
+ }
+ }
+ }
+ Storage::Watchtower { .. } => {
+ //TODO: we need to ensure an offline client will generate the event when it
+ // comes back online after only the watchtower saw the transaction
+ }
+ }
+ }
+ None
+ }
+
+ /// Used by ChannelManager deserialization to broadcast the latest local state if it's copy of
+ /// the Channel was out-of-date.
+ pub(super) fn get_latest_local_commitment_txn(&self) -> Vec<Transaction> {
+ if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
+ let mut res = vec![local_tx.tx.clone()];
+ match self.key_storage {
+ Storage::Local { ref delayed_payment_base_key, ref prev_latest_per_commitment_point, .. } => {
+ res.append(&mut self.broadcast_by_local_state(local_tx, prev_latest_per_commitment_point, &Some(*delayed_payment_base_key), 0).0);
+ // We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do.
+ // The data will be re-generated and tracked in check_spend_local_transaction if we get a confirmation.
+ },
+ _ => panic!("Can only broadcast by local channelmonitor"),
+ };
+ res
+ } else {
+ Vec::new()
+ }
+ }
+
+ fn block_connected(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator)-> (Vec<(Sha256dHash, Vec<TxOut>)>, Vec<SpendableOutputDescriptor>, Vec<(HTLCSource, Option<PaymentPreimage>, PaymentHash)>) {
+ let mut watch_outputs = Vec::new();
+ let mut spendable_outputs = Vec::new();
+ let mut htlc_updated = Vec::new();
+ for tx in txn_matched {
+ if tx.input.len() == 1 {
+ // Assuming our keys were not leaked (in which case we're screwed no matter what),
+ // commitment transactions and HTLC transactions will all only ever have one input,
+ // which is an easy way to filter out any potential non-matching txn for lazy
+ // filters.
+ let prevout = &tx.input[0].previous_output;
+ let mut txn: Vec<Transaction> = Vec::new();
+ let funding_txo = match self.key_storage {
+ Storage::Local { ref funding_info, .. } => {
+ funding_info.clone()
+ }
+ Storage::Watchtower { .. } => {
+ unimplemented!();
+ }
+ };
+ if funding_txo.is_none() || (prevout.txid == funding_txo.as_ref().unwrap().0.txid && prevout.vout == funding_txo.as_ref().unwrap().0.index as u32) {
+ let (remote_txn, new_outputs, mut spendable_output) = self.check_spend_remote_transaction(tx, height, fee_estimator);
+ txn = remote_txn;
+ spendable_outputs.append(&mut spendable_output);
+ if !new_outputs.1.is_empty() {
+ watch_outputs.push(new_outputs);
+ }
+ if txn.is_empty() {
+ let (local_txn, mut spendable_output, new_outputs) = self.check_spend_local_transaction(tx, height);
+ spendable_outputs.append(&mut spendable_output);
+ txn = local_txn;
+ if !new_outputs.1.is_empty() {
+ watch_outputs.push(new_outputs);
+ }
+ }
+ if !funding_txo.is_none() && txn.is_empty() {
+ if let Some(spendable_output) = self.check_spend_closing_transaction(tx) {
+ spendable_outputs.push(spendable_output);
+ }
+ }
+ } else {
+ if let Some(&(commitment_number, _)) = self.remote_commitment_txn_on_chain.get(&prevout.txid) {
+ let (tx, spendable_output) = self.check_spend_remote_htlc(tx, commitment_number, height, fee_estimator);
+ if let Some(tx) = tx {
+ txn.push(tx);
+ }
+ if let Some(spendable_output) = spendable_output {
+ spendable_outputs.push(spendable_output);
+ }
+ }
+ }
+ for tx in txn.iter() {
+ broadcaster.broadcast_transaction(tx);
+ }
+ }
+ // While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs
+ // can also be resolved in a few other ways which can have more than one output. Thus,
+ // we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check.
+ let mut updated = self.is_resolving_htlc_output(tx, height);
+ if updated.len() > 0 {
+ htlc_updated.append(&mut updated);
+ }
+ for inp in &tx.input {
+ if self.our_claim_txn_waiting_first_conf.contains_key(&inp.previous_output) {
+ match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+ hash_map::Entry::Occupied(mut entry) => {
+ let e = entry.get_mut();
+ e.retain(|ref event| {
+ match **event {
+ OnchainEvent::Claim { outpoint } => {
+ return outpoint != inp.previous_output
+ },
+ _ => return true
+ }
+ });
+ e.push(OnchainEvent::Claim { outpoint: inp.previous_output.clone()});
+ }
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec![OnchainEvent::Claim { outpoint: inp.previous_output.clone()}]);
+ }
+ }
+ }
+ }
+ }
+ let mut pending_claims = Vec::new();
+ if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx {
+ if self.would_broadcast_at_height(height) {
+ broadcaster.broadcast_transaction(&cur_local_tx.tx);
+ match self.key_storage {
+ Storage::Local { ref delayed_payment_base_key, ref latest_per_commitment_point, .. } => {
+ let (txs, mut spendable_output, new_outputs, mut pending_txn) = self.broadcast_by_local_state(&cur_local_tx, latest_per_commitment_point, &Some(*delayed_payment_base_key), height);
+ spendable_outputs.append(&mut spendable_output);
+ pending_claims.append(&mut pending_txn);
+ if !new_outputs.is_empty() {
+ watch_outputs.push((cur_local_tx.txid.clone(), new_outputs));
+ }
+ for tx in txs {
+ broadcaster.broadcast_transaction(&tx);
+ }
+ },
+ Storage::Watchtower { .. } => {
+ let (txs, mut spendable_output, new_outputs, mut pending_txn) = self.broadcast_by_local_state(&cur_local_tx, &None, &None, height);
+ spendable_outputs.append(&mut spendable_output);
+ pending_claims.append(&mut pending_txn);
+ if !new_outputs.is_empty() {
+ watch_outputs.push((cur_local_tx.txid.clone(), new_outputs));
+ }
+ for tx in txs {
+ broadcaster.broadcast_transaction(&tx);
+ }
+ }
+ }
+ }
+ }
+ for claim in pending_claims {
+ match self.our_claim_txn_waiting_first_conf.entry(claim.0) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert(claim.1); }
+ }
+ }
+ if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&height) {
+ for ev in events {
+ match ev {
+ OnchainEvent::Claim { outpoint } => {
+ self.our_claim_txn_waiting_first_conf.remove(&outpoint);
+ },
+ OnchainEvent::HTLCUpdate { htlc_update } => {
+ log_trace!(self, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0));
+ htlc_updated.push((htlc_update.0, None, htlc_update.1));
+ },
+ }
+ }
+ }
+ //TODO: iter on buffered TxMaterial in our_claim_txn_waiting_first_conf, if block timer is expired generate a bumped claim tx (RBF or CPFP accordingly)
+ self.last_block_hash = block_hash.clone();
+ (watch_outputs, spendable_outputs, htlc_updated)
+ }
+
+ fn block_disconnected(&mut self, height: u32, block_hash: &Sha256dHash) {
+ if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
+ //We may discard:
+ //- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
+ //- our claim tx on a commitment tx output
+ }
+ self.our_claim_txn_waiting_first_conf.retain(|_, ref mut v| if v.3 == height { false } else { true });
+ self.last_block_hash = block_hash.clone();
+ }
+
+ pub(super) fn would_broadcast_at_height(&self, height: u32) -> bool {
+ // We need to consider all HTLCs which are:
+ // * in any unrevoked remote commitment transaction, as they could broadcast said
+ // transactions and we'd end up in a race, or
+ // * are in our latest local commitment transaction, as this is the thing we will
+ // broadcast if we go on-chain.
+ // Note that we consider HTLCs which were below dust threshold here - while they don't
+ // strictly imply that we need to fail the channel, we need to go ahead and fail them back
+ // to the source, and if we don't fail the channel we will have to ensure that the next
+ // updates that peer sends us are update_fails, failing the channel if not. It's probably
+ // easier to just fail the channel as this case should be rare enough anyway.
+ macro_rules! scan_commitment {
+ ($htlcs: expr, $local_tx: expr) => {
+ for ref htlc in $htlcs {
+ // For inbound HTLCs which we know the preimage for, we have to ensure we hit the
+ // chain with enough room to claim the HTLC without our counterparty being able to
+ // time out the HTLC first.
+ // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary
+ // concern is being able to claim the corresponding inbound HTLC (on another
+ // channel) before it expires. In fact, we don't even really care if our
+ // counterparty here claims such an outbound HTLC after it expired as long as we
+ // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
+ // chain when our counterparty is waiting for expiration to off-chain fail an HTLC
+ // we give ourselves a few blocks of headroom after expiration before going
+ // on-chain for an expired HTLC.
+ // Note that, to avoid a potential attack whereby a node delays claiming an HTLC
+ // from us until we've reached the point where we go on-chain with the
+ // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
+ // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
+ // aka outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER
+ // inbound_cltv == height + CLTV_CLAIM_BUFFER
+ // outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
+ // LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
+ // CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion)
+ // LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
+ // The final, above, condition is checked for statically in channelmanager
+ // with CHECK_CLTV_EXPIRY_SANITY_2.
+ let htlc_outbound = $local_tx == htlc.offered;
+ if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
+ (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
+ log_info!(self, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry);
+ return true;
+ }
+ }