From: Matt Corallo Date: Fri, 4 Jan 2019 19:38:05 +0000 (-0500) Subject: Check for timing-out HTLCs in remote unrevoked commitments X-Git-Tag: v0.0.12~245^2~1 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=d8707c2b205c1fd387dc2a393b46205ae4b73113;p=rust-lightning Check for timing-out HTLCs in remote unrevoked commitments This resolves a TODO/issue in would_broadcast_at_height where we will not fail a channel with HTLCs which time out in remote broadcastable transactions. --- diff --git a/src/ln/channelmonitor.rs b/src/ln/channelmonitor.rs index 546e4a684..c71800e59 100644 --- a/src/ln/channelmonitor.rs +++ b/src/ln/channelmonitor.rs @@ -1568,6 +1568,7 @@ impl ChannelMonitor { if let Some(transaction_output_index) = htlc.transaction_output_index { if let &Some((ref their_sig, ref our_sig)) = sigs { if htlc.offered { + log_trace!(self, "Broadcasting HTLC-Timeout transaction against local commitment transactions"); let mut htlc_timeout_tx = chan_utils::build_htlc_transaction(&local_tx.txid, local_tx.feerate_per_kw, self.their_to_self_delay.unwrap(), htlc, &local_tx.delayed_payment_key, &local_tx.revocation_key); htlc_timeout_tx.input[0].witness.push(Vec::new()); // First is the multisig dummy @@ -1584,6 +1585,7 @@ impl ChannelMonitor { res.push(htlc_timeout_tx); } else { if let Some(payment_preimage) = self.payment_preimages.get(&htlc.payment_hash) { + log_trace!(self, "Broadcasting HTLC-Success transaction against local commitment transactions"); let mut htlc_success_tx = chan_utils::build_htlc_transaction(&local_tx.txid, local_tx.feerate_per_kw, self.their_to_self_delay.unwrap(), htlc, &local_tx.delayed_payment_key, &local_tx.revocation_key); htlc_success_tx.input[0].witness.push(Vec::new()); // First is the multisig dummy @@ -1618,6 +1620,7 @@ impl ChannelMonitor { // weren't yet included in our commitment transaction(s). if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx { if local_tx.txid == commitment_txid { + log_trace!(self, "Got latest local commitment tx broadcast, searching for available HTLCs to claim"); match self.key_storage { Storage::Local { ref delayed_payment_base_key, ref latest_per_commitment_point, .. } => { let (local_txn, spendable_outputs, watch_outputs) = self.broadcast_by_local_state(local_tx, latest_per_commitment_point, &Some(*delayed_payment_base_key)); @@ -1632,6 +1635,7 @@ impl ChannelMonitor { } if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx { if local_tx.txid == commitment_txid { + log_trace!(self, "Got previous local commitment tx broadcast, searching for available HTLCs to claim"); match self.key_storage { Storage::Local { ref delayed_payment_base_key, ref prev_latest_per_commitment_point, .. } => { let (local_txn, spendable_outputs, watch_outputs) = self.broadcast_by_local_state(local_tx, prev_latest_per_commitment_point, &Some(*delayed_payment_base_key)); @@ -1787,44 +1791,66 @@ impl ChannelMonitor { } pub(super) fn would_broadcast_at_height(&self, height: u32) -> bool { - // TODO: We need to consider HTLCs which weren't included in latest local commitment - // transaction (or in any of the latest two local commitment transactions). This probably - // needs to use the same logic as the revoked-tx-announe logic - checking the last two - // remote commitment transactions. This probably has implications for what data we need to - // store in local commitment transactions. + // We need to consider all HTLCs which are: + // * in any unrevoked remote commitment transaction, as they could broadcast said + // transactions and we'd end up in a race, or + // * are in our latest local commitment transaction, as this is the thing we will + // broadcast if we go on-chain. // Note that we consider HTLCs which were below dust threshold here - while they don't // strictly imply that we need to fail the channel, we need to go ahead and fail them back // to the source, and if we don't fail the channel we will have to ensure that the next // updates that peer sends us are update_fails, failing the channel if not. It's probably // easier to just fail the channel as this case should be rare enough anyway. + macro_rules! scan_commitment { + ($htlcs: expr, $local_tx: expr) => { + for ref htlc in $htlcs { + // For inbound HTLCs which we know the preimage for, we have to ensure we hit the + // chain with enough room to claim the HTLC without our counterparty being able to + // time out the HTLC first. + // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary + // concern is being able to claim the corresponding inbound HTLC (on another + // channel) before it expires. In fact, we don't even really care if our + // counterparty here claims such an outbound HTLC after it expired as long as we + // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the + // chain when our counterparty is waiting for expiration to off-chain fail an HTLC + // we give ourselves a few blocks of headroom after expiration before going + // on-chain for an expired HTLC. + // Note that, to avoid a potential attack whereby a node delays claiming an HTLC + // from us until we've reached the point where we go on-chain with the + // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at + // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC. + // aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER + // inbound_cltv == height + CLTV_CLAIM_BUFFER + // outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFER <= inbound_cltv - CLTV_CLAIM_BUFFER + // HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= inbound_cltv - outbound_cltv + // HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= CLTV_EXPIRY_DELTA + let htlc_outbound = $local_tx == htlc.offered; + if ( htlc_outbound && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) || + (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) { + log_info!(self, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry); + return true; + } + } + } + } + if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx { - for &(ref htlc, _, _) in cur_local_tx.htlc_outputs.iter() { - // For inbound HTLCs which we know the preimage for, we have to ensure we hit the - // chain with enough room to claim the HTLC without our counterparty being able to - // time out the HTLC first. - // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary - // concern is being able to claim the corresponding inbound HTLC (on another - // channel) before it expires. In fact, we don't even really care if our - // counterparty here claims such an outbound HTLC after it expired as long as we - // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the - // chain when our counterparty is waiting for expiration to off-chain fail an HTLC - // we give ourselves a few blocks of headroom after expiration before going - // on-chain for an expired HTLC. - // Note that, to avoid a potential attack whereby a node delays claiming an HTLC - // from us until we've reached the point where we go on-chain with the - // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at - // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC. - // aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER - // inbound_cltv == height + CLTV_CLAIM_BUFFER - // outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFER <= inbound_cltv - CLTV_CLAIM_BUFFER - // HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= inbound_cltv - outbound_cltv - // HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= CLTV_EXPIRY_DELTA - if ( htlc.offered && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) || - (!htlc.offered && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) { - return true; + scan_commitment!(cur_local_tx.htlc_outputs.iter().map(|&(ref a, _, _)| a), true); + } + + if let Storage::Local { ref current_remote_commitment_txid, ref prev_remote_commitment_txid, .. } = self.key_storage { + if let &Some(ref txid) = current_remote_commitment_txid { + if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(txid) { + scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false); + } + } + if let &Some(ref txid) = prev_remote_commitment_txid { + if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(txid) { + scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false); } } } + false } diff --git a/src/ln/functional_tests.rs b/src/ln/functional_tests.rs index ca394fcdf..de2b42d3f 100644 --- a/src/ln/functional_tests.rs +++ b/src/ln/functional_tests.rs @@ -6084,10 +6084,107 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { check_closed_broadcast!(nodes[1]); } +fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), if use_dust { 50000 } else { 3000000 }, TEST_FINAL_CLTV).unwrap(); + let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + // As far as A is concerened, the HTLC is now present only in the latest remote commitment + // transaction, however it is not in A's latest local commitment, so we can just broadcast that + // to "time out" the HTLC. + + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 { + nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new()); + header.prev_blockhash = header.bitcoin_hash(); + } + test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); + check_closed_broadcast!(nodes[0]); +} + +fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { + let nodes = create_network(3); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present + // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions. + // Also optionally test that we *don't* fail the channel in case the commitment transaction was + // actually revoked. + let htlc_value = if use_dust { 50000 } else { 3000000 }; + let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); + assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash, htlc_value)); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + + let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap(); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + if check_revoke_no_close { + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[0], 1); + } + + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 { + nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new()); + header.prev_blockhash = header.bitcoin_hash(); + } + if !check_revoke_no_close { + test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); + check_closed_broadcast!(nodes[0]); + } else { + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => { + assert_eq!(payment_hash, our_payment_hash); + assert!(rejected_by_dest); + }, + _ => panic!("Unexpected event"), + } + } +} + +// Test that we close channels on-chain when broadcastable HTLCs reach their timeout window. +// There are only a few cases to test here: +// * its not really normative behavior, but we test that below-dust HTLCs "included" in +// broadcastable commitment transactions result in channel closure, +// * its included in an unrevoked-but-previous remote commitment transaction, +// * its included in the latest remote or local commitment transactions. +// We test each of the three possible commitment transactions individually and use both dust and +// non-dust HTLCs. +// Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we +// assume they are handled the same across all six cases, as both outbound and inbound failures are +// tested for at least one of the cases in other tests. #[test] -fn htlc_claim_local_commitment_only() { +fn htlc_claim_single_commitment_only_a() { do_htlc_claim_local_commitment_only(true); do_htlc_claim_local_commitment_only(false); + + do_htlc_claim_current_remote_commitment_only(true); + do_htlc_claim_current_remote_commitment_only(false); +} + +#[test] +fn htlc_claim_single_commitment_only_b() { + do_htlc_claim_previous_remote_commitment_only(true, false); + do_htlc_claim_previous_remote_commitment_only(false, false); + do_htlc_claim_previous_remote_commitment_only(true, true); + do_htlc_claim_previous_remote_commitment_only(false, true); } fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option)