From c9483c69081dc6818ed57e2ca3212010bb132dc7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 9 Jan 2020 14:09:25 -0500 Subject: [PATCH] Time out incoming HTLCs when we reach cltv_expiry (+ test) We only do this for incoming HTLCs directly as we rely on channel closure and HTLC-Timeout broadcast to fail any HTLCs which we relayed onwards where our next-hop doesn't update_fail in time. --- lightning/src/ln/channelmanager.rs | 47 +++++++++++++++--- lightning/src/ln/functional_test_utils.rs | 8 ++- lightning/src/ln/functional_tests.rs | 60 +++++++++++++++++++++++ lightning/src/util/events.rs | 3 ++ 4 files changed, 110 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0acc59df6..b61656707 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -28,7 +28,7 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER}; use ln::features::{InitFeatures, NodeFeatures}; use ln::router::{Route, RouteHop}; use ln::msgs; @@ -76,6 +76,7 @@ enum PendingHTLCRouting { }, Receive { payment_data: Option, + incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed }, } @@ -129,6 +130,7 @@ struct ClaimableHTLC { /// payment_secret which prevents path-probing attacks and can associate different HTLCs which /// are part of the same payment. payment_data: Option, + cltv_expiry: u32, } /// Tracks the inbound corresponding to an outbound HTLC @@ -296,8 +298,6 @@ pub(super) struct ChannelHolder { /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - /// TODO: We need to time out HTLCs sitting here which are waiting on other AMP HTLCs to - /// arrive. claimable_htlcs: HashMap<(PaymentHash, Option), Vec>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). @@ -1063,7 +1063,10 @@ impl ChannelMan // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { payment_data }, + routing: PendingHTLCRouting::Receive { + payment_data, + incoming_cltv_expiry: msg.cltv_expiry, + }, payment_hash: msg.payment_hash.clone(), incoming_shared_secret: shared_secret, amt_to_forward: next_hop_data.amt_to_forward, @@ -1686,7 +1689,7 @@ impl ChannelMan for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { payment_data }, + routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry }, incoming_shared_secret, payment_hash, amt_to_forward, .. }, } => { let prev_hop = HTLCPreviousHopData { short_channel_id: prev_short_channel_id, @@ -1703,6 +1706,7 @@ impl ChannelMan prev_hop, value: amt_to_forward, payment_data: payment_data.clone(), + cltv_expiry: incoming_cltv_expiry, }); if let &Some(ref data) = &payment_data { for htlc in htlcs.iter() { @@ -2958,6 +2962,7 @@ impl= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); + htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { + failure_code: 0x4000 | 15, + data: htlc_msat_height_data + })); + false + } else { true } + }); + !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }); } for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } + + for (source, payment_hash, reason) in timed_out_htlcs.drain(..) { + // Call it incorrect_or_unknown_payment_details as the issue, ultimately, is that the + // user failed to provide us a preimage within the cltv_expiry time window. + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason); + } self.latest_block_height.store(height as usize, Ordering::Release); *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash; loop { @@ -3320,9 +3350,10 @@ impl Writeable for PendingHTLCInfo { onion_packet.write(writer)?; short_channel_id.write(writer)?; }, - &PendingHTLCRouting::Receive { ref payment_data } => { + &PendingHTLCRouting::Receive { ref payment_data, ref incoming_cltv_expiry } => { 1u8.write(writer)?; payment_data.write(writer)?; + incoming_cltv_expiry.write(writer)?; }, } self.incoming_shared_secret.write(writer)?; @@ -3343,6 +3374,7 @@ impl Readable for PendingHTLCInfo { }, 1u8 => PendingHTLCRouting::Receive { payment_data: Readable::read(reader)?, + incoming_cltv_expiry: Readable::read(reader)?, }, _ => return Err(DecodeError::InvalidValue), }, @@ -3415,7 +3447,8 @@ impl_writeable!(HTLCPreviousHopData, 0, { impl_writeable!(ClaimableHTLC, 0, { prev_hop, value, - payment_data + payment_data, + cltv_expiry }); impl Writeable for HTLCSource { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 55f734a32..5fd420793 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -717,7 +717,7 @@ macro_rules! get_payment_preimage_hash { } } -macro_rules! expect_pending_htlcs_forwardable { +macro_rules! expect_pending_htlcs_forwardable_ignore { ($node: expr) => {{ let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -725,6 +725,12 @@ macro_rules! expect_pending_htlcs_forwardable { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; + }} +} + +macro_rules! expect_pending_htlcs_forwardable { + ($node: expr) => {{ + expect_pending_htlcs_forwardable_ignore!($node); $node.node.process_pending_htlc_forwards(); }} } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 9d8a0bc24..31c96950a 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2320,6 +2320,8 @@ fn claim_htlc_outputs_single_tx() { check_added_monitors!(nodes[0], 1); nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); check_added_monitors!(nodes[1], 1); + expect_pending_htlcs_forwardable_ignore!(nodes[0]); + connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash()); let events = nodes[1].node.get_and_clear_pending_events(); @@ -3653,6 +3655,60 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } +#[test] +fn test_htlc_timeout() { + // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them + // to avoid our counterparty failing the channel. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], 100000); + + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].block_notifier.block_connected_checked(&header, 101, &[], &[]); + nodes[1].block_notifier.block_connected_checked(&header, 101, &[], &[]); + for i in 102..TEST_FINAL_CLTV + 100 + 1 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS { + header.prev_blockhash = header.bitcoin_hash(); + nodes[0].block_notifier.block_connected_checked(&header, i, &[], &[]); + nodes[1].block_notifier.block_connected_checked(&header, i, &[], &[]); + } + + expect_pending_htlcs_forwardable!(nodes[1]); + + check_added_monitors!(nodes[1], 1); + let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); + assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1); + assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty()); + assert!(htlc_timeout_updates.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); + let events = nodes[0].node.get_and_clear_pending_events(); + match &events[0] { + &Event::PaymentFailed { payment_hash, rejected_by_dest, error_code, ref error_data } => { + assert_eq!(payment_hash, our_payment_hash); + assert!(rejected_by_dest); + assert_eq!(error_code.unwrap(), 0x4000 | 15); + // 100_000 msat as u64, followed by a height of 123 as u32 + assert_eq!(&error_data.as_ref().unwrap()[..], &[ + ((100_000u64 >> 7*8) & 0xff) as u8, + ((100_000u64 >> 6*8) & 0xff) as u8, + ((100_000u64 >> 5*8) & 0xff) as u8, + ((100_000u64 >> 4*8) & 0xff) as u8, + ((100_000u64 >> 3*8) & 0xff) as u8, + ((100_000u64 >> 2*8) & 0xff) as u8, + ((100_000u64 >> 1*8) & 0xff) as u8, + ((100_000u64 >> 0*8) & 0xff) as u8, + 0, 0, 0, 123]); + }, + _ => panic!("Unexpected event"), + } +} + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs @@ -7140,6 +7196,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { // Broadcast set of revoked txn on A let header_128 = connect_blocks(&nodes[0].block_notifier, 128, 0, true, header.bitcoin_hash()); + expect_pending_htlcs_forwardable_ignore!(nodes[0]); + let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] }, 129); let first; @@ -7472,6 +7530,8 @@ fn test_bump_txn_sanitize_tracking_maps() { // Broadcast set of revoked txn on A let header_128 = connect_blocks(&nodes[0].block_notifier, 128, 0, false, Default::default()); + expect_pending_htlcs_forwardable_ignore!(nodes[0]); + let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone()] }, 129); check_closed_broadcast!(nodes[0], false); diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 8f3460567..ca6355af0 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -55,6 +55,9 @@ pub enum Event { /// ChannelManager::fail_htlc_backwards to free up resources for this HTLC. /// The amount paid should be considered 'incorrect' when it is less than or more than twice /// the amount expected. + /// If you fail to call either ChannelManager::claim_funds or + /// ChannelManager::fail_htlc_backwards within the HTLC's timeout, the HTLC will be + /// automatically failed. PaymentReceived { /// The hash for which the preimage should be handed to the ChannelManager. payment_hash: PaymentHash, -- 2.39.5