/// fail this HTLC,
/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
/// condition with the above), we will fail this HTLC without telling the user we received it,
-/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and
-/// that HTLC expires within this many blocks, we will simply fail the HTLC instead.
///
/// (1) is all about protecting us - we need enough time to update the channel state before we hit
/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
/// in a race condition between the user connecting a block (which would fail it) and the user
/// providing us the preimage (which would claim it).
-///
-/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may
-/// end up force-closing the channel on us to claim it.
pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
// TODO(devrandom) replace this with HolderCommitmentTransaction
use ln::chan_utils;
use chain::BestBlock;
use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
-use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER};
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
use chain::transaction::{OutPoint, TransactionData};
use chain::keysinterface::{Sign, KeysInterface};
use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, logger: &L)
-> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> where L::Target: Logger {
let mut timed_out_htlcs = Vec::new();
- let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER;
+ // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
+ // forward an HTLC when our counterparty should almost certainly just fail it for expiring
+ // ~now.
+ let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
self.holding_cell_htlc_updates.retain(|htlc_update| {
match htlc_update {
&HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
let cur_height = self.best_block.read().unwrap().height() + 1;
- // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
- // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
+ // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+ // but we want to be robust wrt to counterparty packet sanitization (see
+ // HTLC_FAIL_BACK_BUFFER rationale).
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
break Some(("CLTV expiry is too far in the future", 21, None));
}
- // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
- // But, to be safe against policy reception, we use a longer delay.
- if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
+ // If the HTLC expires ~now, don't bother trying to forward it to our
+ // counterparty. They should fail it anyway, but we don't want to bother with
+ // the round-trips or risk them deciding they definitely want the HTLC and
+ // force-closing to ensure they get it if we're offline.
+ // We previously had a much more aggressive check here which tried to ensure
+ // our counterparty receives an HTLC which has *our* risk threshold met on it,
+ // but there is no need to do that, and since we're a bit conservative with our
+ // risk threshold it just results in failing to forward payments.
+ if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
check_added_monitors!(nodes[1], 0);
}
- connect_blocks(&nodes[1], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS);
+ connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
connect_blocks(&nodes[1], 1);