Add more comments about timelock assumptions and security model
authorAntoine Riard <ariard@student.42.fr>
Thu, 18 Jul 2019 22:50:03 +0000 (18:50 -0400)
committerAntoine Riard <ariard@student.42.fr>
Fri, 19 Jul 2019 21:19:46 +0000 (17:19 -0400)
Rename HTLC_FAIL_ANTI_REORG_DELAY to ANTI_REORG_DELAY because
we are going to rely on it also to remove bump candidates outpoint
from tracker after claim get enough depth.

Rename HTLC_FAIL_TIMEOUT_BLOCKS to LATENCY_GRACE_PERIOD_BLOCKS because
it's carrying more meaningfully that we are doing a favor to our
peer instead of ruthlessly enforcing the contract.

CLTV_EXPIRY_DELTA should be > to LATENCY_GRACE_PERIOD_BLOCKS +
+CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS
When we reached height + LATENCY_GRACE_PERIOD_BLOCKS and we have pending
unsolved outbound HTLC, we fail onchain with
our local commitment tx. At this point we expect to get in chain in a
worst-case delay of CLTV_CLAIM_BUFFER. When our HTLC-timeout is
confirmed with ANTI_REORG_DELAY we may safely fail backward the
corresponding inbound output.

src/ln/channelmanager.rs
src/ln/channelmonitor.rs
src/ln/functional_tests.rs

index 201865d67386fc920b8c7c162f157eb049493779..64a9f2ebb8e575c3895d77efd3ab4eaa1b82ccf0 100644 (file)
@@ -28,7 +28,7 @@ use secp256k1;
 use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
 use chain::transaction::OutPoint;
 use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use ln::router::Route;
 use ln::msgs;
 use ln::onion_utils;
@@ -351,20 +351,21 @@ pub struct ChannelManager {
 const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
 pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
 
-// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS +
-// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within
-// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it
-// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel
-// on-chain to time out the HTLC.
+// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
+// ie that if the next-hop peer fails the HTLC within
+// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
+// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
+// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
+// LATENCY_GRACE_PERIOD_BLOCKS.
 #[deny(const_err)]
 #[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY;
+const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
 
 // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
 // ChannelMontior::would_broadcast_at_height for a description of why this is needed.
 #[deny(const_err)]
 #[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 
 macro_rules! secp_call {
        ( $res: expr, $err: expr ) => {
@@ -820,7 +821,7 @@ impl ChannelManager {
                let pending_forward_info = if next_hop_data.hmac == [0; 32] {
                                // OUR PAYMENT!
                                // final_expiry_too_soon
-                               if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
+                               if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
                                        return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
                                }
                                // final_incorrect_htlc_amount
@@ -912,8 +913,8 @@ impl ChannelManager {
                                                break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
                                        }
                                        let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
-                                       // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
-                                       if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
+                                       // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
+                                       if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon
                                                break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
                                        }
                                        if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
index 2ddb2690ad8ed00de00090c7d3449b9397cba8f9..6e1b212ea24c051edaa9f8f9eb4dca4380b3a389 100644 (file)
@@ -303,12 +303,24 @@ const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
 pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6;
 /// Number of blocks by which point we expect our counterparty to have seen new blocks on the
 /// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
-/// copies of ChannelMonitors, including watchtowers).
-pub(crate) const HTLC_FAIL_TIMEOUT_BLOCKS: u32 = 3;
-/// Number of blocks we wait on seeing a confirmed HTLC-Timeout or previous revoked commitment
-/// transaction before we fail corresponding inbound HTLCs. This prevents us from failing backwards
-/// and then getting a reorg resulting in us losing money.
-pub(crate) const HTLC_FAIL_ANTI_REORG_DELAY: u32 = 6;
+/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing
+/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he
+/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our
+/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures
+/// due to expiration but increase the cost of funds being locked longuer in case of failure.
+/// This delay also cover a low-power peer being slow to process blocks and so being behind us on
+/// accurate block height.
+/// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY
+/// with at worst this delay, so we are not only using this value as a mercy for them but also
+/// us as a safeguard to delay with enough time.
+pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
+/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound
+/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money.
+/// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
+/// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already
+/// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
+/// keeping bumping another claim tx to solve the outpoint.
+pub(crate) const ANTI_REORG_DELAY: u32 = 6;
 
 #[derive(Clone, PartialEq)]
 enum Storage {
@@ -353,7 +365,7 @@ enum InputDescriptors {
 }
 
 /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
-/// once they mature to enough confirmations (HTLC_FAIL_ANTI_REORG_DELAY)
+/// once they mature to enough confirmations (ANTI_REORG_DELAY)
 #[derive(Clone, PartialEq)]
 enum OnchainEvent {
        /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
@@ -1298,8 +1310,8 @@ impl ChannelMonitor {
                                                if let Some(ref outpoints) = self.remote_claimable_outpoints.get($txid) {
                                                        for &(ref htlc, ref source_option) in outpoints.iter() {
                                                                if let &Some(ref source) = source_option {
-                                                                       log_info!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of revoked remote commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
-                                                                       match self.onchain_events_waiting_threshold_conf.entry(height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
+                                                                       log_info!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of revoked remote commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
+                                                                       match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
                                                                                hash_map::Entry::Occupied(mut entry) => {
                                                                                        let e = entry.get_mut();
                                                                                        e.retain(|ref event| {
@@ -1396,7 +1408,7 @@ impl ChannelMonitor {
                                                                        }
                                                                }
                                                                log_trace!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of remote commitment transaction", log_bytes!(htlc.payment_hash.0), $commitment_tx);
-                                                               match self.onchain_events_waiting_threshold_conf.entry(height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
+                                                               match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
                                                                        hash_map::Entry::Occupied(mut entry) => {
                                                                                let e = entry.get_mut();
                                                                                e.retain(|ref event| {
@@ -1788,8 +1800,8 @@ impl ChannelMonitor {
 
                macro_rules! wait_threshold_conf {
                        ($height: expr, $source: expr, $commitment_tx: expr, $payment_hash: expr) => {
-                               log_trace!(self, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
-                               match self.onchain_events_waiting_threshold_conf.entry($height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
+                               log_trace!(self, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
+                               match self.onchain_events_waiting_threshold_conf.entry($height + ANTI_REORG_DELAY - 1) {
                                        hash_map::Entry::Occupied(mut entry) => {
                                                let e = entry.get_mut();
                                                e.retain(|ref event| {
@@ -2022,7 +2034,7 @@ impl ChannelMonitor {
        }
 
        fn block_disconnected(&mut self, height: u32, block_hash: &Sha256dHash) {
-               if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + HTLC_FAIL_ANTI_REORG_DELAY - 1)) {
+               if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
                        //We may discard:
                        //- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
                        //- our claim tx on a commitment tx output
@@ -2059,16 +2071,16 @@ impl ChannelMonitor {
                                        // from us until we've reached the point where we go on-chain with the
                                        // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
                                        // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
-                                       //  aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER
+                                       //  aka outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER
                                        //      inbound_cltv == height + CLTV_CLAIM_BUFFER
-                                       //      outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
-                                       //      HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
+                                       //      outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
+                                       //      LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
                                        //      CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion)
-                                       //      HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
+                                       //      LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
                                        //  The final, above, condition is checked for statically in channelmanager
                                        //  with CHECK_CLTV_EXPIRY_SANITY_2.
                                        let htlc_outbound = $local_tx == htlc.offered;
-                                       if ( htlc_outbound && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) ||
+                                       if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
                                           (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
                                                log_info!(self, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry);
                                                return true;
@@ -2206,8 +2218,8 @@ impl ChannelMonitor {
                                        payment_preimage.0.copy_from_slice(&input.witness[1]);
                                        htlc_updated.push((source, Some(payment_preimage), payment_hash));
                                } else {
-                                       log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
-                                       match self.onchain_events_waiting_threshold_conf.entry(height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
+                                       log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
+                                       match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        let e = entry.get_mut();
                                                        e.retain(|ref event| {
index 2706627a05eb243c8b9a0662b85acff15f9ea602..084b6bc5c61a0f63301b840f95929b656d5c5e44 100644 (file)
@@ -8,7 +8,7 @@ use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor};
 use chain::keysinterface;
 use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC, BREAKDOWN_TIMEOUT};
 use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash};
-use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, ManyChannelMonitor, HTLC_FAIL_ANTI_REORG_DELAY};
+use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY};
 use ln::channel::{ACCEPTED_HTLC_SCRIPT_WEIGHT, OFFERED_HTLC_SCRIPT_WEIGHT};
 use ln::onion_utils;
 use ln::router::{Route, RouteHop};
@@ -1695,7 +1695,7 @@ fn channel_monitor_network_test() {
        {
                let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
-               for i in 3..TEST_FINAL_CLTV + 2 + HTLC_FAIL_TIMEOUT_BLOCKS + 1 {
+               for i in 3..TEST_FINAL_CLTV + 2 + LATENCY_GRACE_PERIOD_BLOCKS + 1 {
                        header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                        nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
                }
@@ -1871,7 +1871,7 @@ fn claim_htlc_outputs_shared_tx() {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
-               connect_blocks(&nodes[1].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+               connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
 
                let events = nodes[1].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
@@ -1939,7 +1939,7 @@ fn claim_htlc_outputs_single_tx() {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
                nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
-               connect_blocks(&nodes[1].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
+               connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
 
                let events = nodes[1].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
@@ -2241,7 +2241,7 @@ fn test_htlc_on_chain_timeout() {
        }
 
        nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![timeout_tx]}, 1);
-       connect_blocks(&nodes[1].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+       connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
        check_added_monitors!(nodes[1], 0);
        check_closed_broadcast!(nodes[1]);
 
@@ -2300,7 +2300,7 @@ fn test_simple_commitment_revoked_fail_backward() {
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
-       connect_blocks(&nodes[1].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+       connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
        check_added_monitors!(nodes[1], 0);
        check_closed_broadcast!(nodes[1]);
 
@@ -2453,7 +2453,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
-       connect_blocks(&nodes[1].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+       connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
@@ -3899,7 +3899,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        check_spends!(htlc_success_txn[1], commitment_txn[0].clone());
 
        nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_timeout_tx] }, 200);
-       connect_blocks(&nodes[1].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
+       connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
        expect_pending_htlcs_forwardable!(nodes[1]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
@@ -4118,7 +4118,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        } else {
                nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&ds_prev_commitment_tx[0]], &[1; 1]);
        }
-       connect_blocks(&nodes[2].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
+       connect_blocks(&nodes[2].chain_monitor, ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
        check_closed_broadcast!(nodes[2]);
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 2);
@@ -4348,7 +4348,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        // to "time out" the HTLC.
 
        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
+       for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
                nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
                header.prev_blockhash = header.bitcoin_hash();
        }
@@ -4387,7 +4387,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        }
 
        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
+       for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
                nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
                header.prev_blockhash = header.bitcoin_hash();
        }
@@ -4791,7 +4791,7 @@ fn test_onion_failure() {
        }, || {}, true, Some(UPDATE|13), Some(msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id: channels[0].0.contents.short_channel_id, is_permanent: true}));
 
        run_onion_failure_test("expiry_too_soon", 0, &nodes, &route, &payment_hash, |msg| {
-               let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - HTLC_FAIL_TIMEOUT_BLOCKS + 1;
+               let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[1].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
        }, ||{}, true, Some(UPDATE|14), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
@@ -4801,7 +4801,7 @@ fn test_onion_failure() {
        }, false, Some(PERM|15), None);
 
        run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &route, &payment_hash, |msg| {
-               let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - HTLC_FAIL_TIMEOUT_BLOCKS + 1;
+               let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[2].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
        }, || {}, true, Some(17), None);
@@ -5515,7 +5515,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
 }
 
 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
-       // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach HTLC_FAIL_ANTI_REORG_DELAY
+       // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
        // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
        // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
 
@@ -5577,7 +5577,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        }
 
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
-       connect_blocks(&nodes[0].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
+       connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
        let events = nodes[0].node.get_and_clear_pending_events();
        // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
        assert_eq!(events.len(), 2);
@@ -5655,7 +5655,7 @@ fn test_no_failure_dust_htlc_local_commitment() {
 }
 
 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
-       // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach HTLC_FAIL_ANTI_REORG_DELAY.
+       // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
        // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
        // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
        // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
@@ -5692,7 +5692,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                }
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
-               let parent_hash  = connect_blocks(&nodes[0].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
+               let parent_hash  = connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
                let events = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
@@ -5707,7 +5707,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                nodes[0].chain_monitor.block_connected_checked(&header_2, 7, &[&timeout_tx[0]], &[1; 1]);
                let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               connect_blocks(&nodes[0].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
+               connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
                let events = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
@@ -5727,7 +5727,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                        _ => panic!("Unexpected event"),
                }
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
-               let parent_hash  = connect_blocks(&nodes[0].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
+               let parent_hash  = connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
                let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                if !revoked {
                        let events = nodes[0].node.get_and_clear_pending_events();
@@ -5743,7 +5743,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                        nodes[0].chain_monitor.block_connected_checked(&header_2, 7, &[&timeout_tx[0]], &[1; 1]);
                        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                        let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-                       connect_blocks(&nodes[0].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
+                       connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
                        let events = nodes[0].node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 1);
                        match events[0] {
@@ -5753,7 +5753,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                                _ => panic!("Unexpected event"),
                        }
                } else {
-                       // If revoked, both dust & non-dust HTLCs should have been failed after HTLC_FAIL_ANTI_REORG_DELAY confs of revoked
+                       // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
                        // commitment tx
                        let events = nodes[0].node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 2);