Merge pull request #1119 from TheBlueMatt/2021-10-less-aggressive-htlc-timeouts
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 16 Nov 2021 16:18:20 +0000 (16:18 +0000)
committerGitHub <noreply@github.com>
Tue, 16 Nov 2021 16:18:20 +0000 (16:18 +0000)
Be less aggressive in outbound HTLC CLTV timeout checks

lightning/src/chain/channelmonitor.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs

index 8be785f29d2c38c3165023816e8821b1853418d1..425a1890a8f46acfabd68773dbc32f51172e2676 100644 (file)
@@ -225,8 +225,6 @@ pub const ANTI_REORG_DELAY: u32 = 6;
 ///    fail this HTLC,
 /// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
 ///    condition with the above), we will fail this HTLC without telling the user we received it,
-/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and
-///    that HTLC expires within this many blocks, we will simply fail the HTLC instead.
 ///
 /// (1) is all about protecting us - we need enough time to update the channel state before we hit
 /// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
@@ -234,9 +232,6 @@ pub const ANTI_REORG_DELAY: u32 = 6;
 /// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
 /// in a race condition between the user connecting a block (which would fail it) and the user
 /// providing us the preimage (which would claim it).
-///
-/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may
-/// end up force-closing the channel on us to claim it.
 pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
 
 // TODO(devrandom) replace this with HolderCommitmentTransaction
index e39478bd740c063ce1f29a7dc853238ec75fcf48..2976882c8dd5888ec0e1f0fdc6b74ef5d008e892 100644 (file)
@@ -32,7 +32,7 @@ use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputIn
 use ln::chan_utils;
 use chain::BestBlock;
 use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
-use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER};
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
 use chain::transaction::{OutPoint, TransactionData};
 use chain::keysinterface::{Sign, KeysInterface};
 use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
@@ -4197,7 +4197,10 @@ impl<Signer: Sign> Channel<Signer> {
        pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, logger: &L)
                        -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> where L::Target: Logger {
                let mut timed_out_htlcs = Vec::new();
-               let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER;
+               // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
+               // forward an HTLC when our counterparty should almost certainly just fail it for expiring
+               // ~now.
+               let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
                self.holding_cell_htlc_updates.retain(|htlc_update| {
                        match htlc_update {
                                &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
index 5aebf3dbf5c334f19bcc8c4de00d5db10b7621d3..631edfe6cf375bd1f419d7b949b9cd728a100621 100644 (file)
@@ -1959,17 +1959,24 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        let cur_height = self.best_block.read().unwrap().height() + 1;
-                                       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
-                                       // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
+                                       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+                                       // but we want to be robust wrt to counterparty packet sanitization (see
+                                       // HTLC_FAIL_BACK_BUFFER rationale).
                                        if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
                                                break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
                                                break Some(("CLTV expiry is too far in the future", 21, None));
                                        }
-                                       // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
-                                       // But, to be safe against policy reception, we use a longer delay.
-                                       if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
+                                       // If the HTLC expires ~now, don't bother trying to forward it to our
+                                       // counterparty. They should fail it anyway, but we don't want to bother with
+                                       // the round-trips or risk them deciding they definitely want the HTLC and
+                                       // force-closing to ensure they get it if we're offline.
+                                       // We previously had a much more aggressive check here which tried to ensure
+                                       // our counterparty receives an HTLC which has *our* risk threshold met on it,
+                                       // but there is no need to do that, and since we're a bit conservative with our
+                                       // risk threshold it just results in failing to forward payments.
+                                       if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
                                                break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
 
index 5e87aaff3c40a93c67ddcf012a67098a33d427b7..118bb1497d8f20a833a2c7273e56a25c81b4dea7 100644 (file)
@@ -3994,7 +3994,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
        }
        check_added_monitors!(nodes[1], 0);
 
-       connect_blocks(&nodes[1], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS);
+       connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        connect_blocks(&nodes[1], 1);