From 6bfab9d30a4ddab27f785643f2d974ffdc8371c0 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 31 Jul 2021 03:34:16 +0000 Subject: [PATCH] Correctly detect missing HTLCs when a local commitment tx was broadcast If we forward an HTLC to our counterparty, but we force-closed the channel before our counterparty provides us an updated commitment transaction, we'll end up with a commitment transaction that does not contain the HTLC which we attempted to forward. In this case, we need to wait `ANTI_REORG_DELAY` blocks and then fail back the HTLC as there is no way for us to learn the preimage and the confirmed commitment transaction paid us the value of the HTLC. However, check_spend_holder_transaction did not do this - it instead only looked for dust HTLCs in the confirmed commitment transaction, paying no attention to what other HTLCs may exist that are missed. This will eventually lead to channel force-closure as the channel on which we received the inbound HTLC to forward will be closed in time for the initial sender to claim the HTLC on-chain. --- lightning/src/chain/channelmonitor.rs | 52 ++++++----------- lightning/src/ln/mod.rs | 3 + lightning/src/ln/monitor_tests.rs | 81 +++++++++++++++++++++++++++ 3 files changed, 101 insertions(+), 35 deletions(-) create mode 100644 lightning/src/ln/monitor_tests.rs diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 23696e7b..b5f6bce2 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -510,6 +510,9 @@ pub(crate) struct ChannelMonitorImpl { on_holder_tx_csv: u16, commitment_secrets: CounterpartyCommitmentSecrets, + /// The set of outpoints in each counterparty commitment transaction. We always need at least + /// the payment hash from `HTLCOutputInCommitment` to claim even a revoked commitment + /// transaction broadcast as we need to be able to construct the witness script in all cases. counterparty_claimable_outpoints: HashMap>)>>, /// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain. /// Nor can we figure out their commitment numbers without the commitment transaction they are @@ -1204,6 +1207,18 @@ impl ChannelMonitor { /// Compares a broadcasted commitment transaction's HTLCs with those in the latest state, /// failing any HTLCs which didn't make it into the broadcasted commitment transaction back /// after ANTI_REORG_DELAY blocks. +/// +/// We always compare against the set of HTLCs in counterparty commitment transactions, as those +/// are the commitment transactions which are generated by us. The off-chain state machine in +/// `Channel` will automatically resolve any HTLCs which were never included in a commitment +/// transaction when it detects channel closure, but it is up to us to ensure any HTLCs which were +/// included in a remote commitment transaction are failed back if they are not present in the +/// broadcasted commitment transaction. +/// +/// Specifically, the removal process for HTLCs in `Channel` is always based on the counterparty +/// sending a `revoke_and_ack`, which causes us to clear `prev_counterparty_commitment_txid`. Thus, +/// as long as we examine both the current counterparty commitment transaction and, if it hasn't +/// been revoked yet, the previous one, we we will never "forget" to resolve an HTLC. macro_rules! fail_unbroadcast_htlcs { ($self: expr, $commitment_tx_type: expr, $commitment_tx_conf_height: expr, $confirmed_htlcs_list: expr, $logger: expr) => { { macro_rules! check_htlc_fails { @@ -1780,6 +1795,7 @@ impl ChannelMonitorImpl { let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height); let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx); append_onchain_update!(res, to_watch); + fail_unbroadcast_htlcs!(self, "latest holder", height, self.current_holder_commitment_tx.htlc_outputs.iter().map(|(a, _, c)| (a, c.as_ref())), logger); } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx { if holder_tx.txid == commitment_txid { is_holder_tx = true; @@ -1787,45 +1803,11 @@ impl ChannelMonitorImpl { let res = self.get_broadcasted_holder_claims(holder_tx, height); let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx); append_onchain_update!(res, to_watch); - } - } - - macro_rules! fail_dust_htlcs_after_threshold_conf { - ($holder_tx: expr, $commitment_tx: expr) => { - for &(ref htlc, _, ref source) in &$holder_tx.htlc_outputs { - if htlc.transaction_output_index.is_none() { - if let &Some(ref source) = source { - self.onchain_events_awaiting_threshold_conf.retain(|ref entry| { - if entry.height != height { return true; } - match entry.event { - OnchainEvent::HTLCUpdate { source: ref update_source, .. } => { - update_source != source - }, - _ => true, - } - }); - let entry = OnchainEventEntry { - txid: commitment_txid, - height, - event: OnchainEvent::HTLCUpdate { - source: source.clone(), payment_hash: htlc.payment_hash, - onchain_value_satoshis: Some(htlc.amount_msat / 1000) - }, - }; - log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})", - log_bytes!(htlc.payment_hash.0), $commitment_tx, entry.confirmation_threshold()); - self.onchain_events_awaiting_threshold_conf.push(entry); - } - } - } + fail_unbroadcast_htlcs!(self, "previous holder", height, holder_tx.htlc_outputs.iter().map(|(a, _, c)| (a, c.as_ref())), logger); } } if is_holder_tx { - fail_dust_htlcs_after_threshold_conf!(self.current_holder_commitment_tx, "latest"); - if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx { - fail_dust_htlcs_after_threshold_conf!(holder_tx, "previous"); - } } (claim_requests, (commitment_txid, watch_outputs)) diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 7500b93c..f1ba914c 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -52,6 +52,9 @@ mod reorg_tests; #[cfg(test)] #[allow(unused_mut)] mod onion_route_tests; +#[cfg(test)] +#[allow(unused_mut)] +mod monitor_tests; pub use self::peer_channel_encryptor::LN_MAX_MSG_LEN; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs new file mode 100644 index 00000000..8fdf28e5 --- /dev/null +++ b/lightning/src/ln/monitor_tests.rs @@ -0,0 +1,81 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Further functional tests which test blockchain reorganizations. + +use chain::channelmonitor::ANTI_REORG_DELAY; +use ln::{PaymentPreimage, PaymentHash}; +use ln::features::InitFeatures; +use ln::msgs::{ChannelMessageHandler, HTLCFailChannelUpdate, ErrorAction}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use routing::router::get_route; + +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; + +use prelude::*; + +use ln::functional_test_utils::*; + +#[test] +fn chanmon_fail_from_stale_commitment() { + // If we forward an HTLC to our counterparty, but we force-closed the channel before our + // counterparty provides us an updated commitment transaction, we'll end up with a commitment + // transaction that does not contain the HTLC which we attempted to forward. In this case, we + // need to wait `ANTI_REORG_DELAY` blocks and then fail back the HTLC as there is no way for us + // to learn the preimage and the confirmed commitment transaction paid us the value of the + // HTLC. + // + // However, previously, we did not do this, ignoring the HTLC entirely. + // + // This could lead to channel closure if the sender we received the HTLC from decides to go on + // chain to get their HTLC back before it times out. + // + // Here, we check exactly this case, forwarding a payment from A, through B, to C, before B + // broadcasts its latest commitment transaction, which should result in it eventually failing + // the HTLC back off-chain to A. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let (update_a, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let bs_txn = get_local_commitment_txn!(nodes[1], chan_id_2); + + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); + + // Don't bother delivering the new HTLC add/commits, instead confirming the pre-HTLC commitment + // transaction for nodes[1]. + mine_transaction(&nodes[1], &bs_txn[0]); + check_added_monitors!(nodes[1], 1); + check_closed_broadcast!(nodes[1], true); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], payment_hash, false); + expect_payment_failure_chan_update!(nodes[0], update_a.contents.short_channel_id, true); +} -- 2.30.2