Fix XXXs that slipped into router and handle HTLCFailCHannelUpdates
[rust-lightning] / src / ln / channelmonitor.rs
index 4b72a3cb69d1c3436048d050c649c3ee1fae350b..60cb9c91583d604fa8fa464bd0b0f60790c48005 100644 (file)
@@ -39,6 +39,7 @@ use std::sync::{Arc,Mutex};
 use std::{hash,cmp};
 
 /// An error enum representing a failure to persist a channel monitor update.
+#[derive(Clone)]
 pub enum ChannelMonitorUpdateErr {
        /// Used to indicate a temporary failure (eg connection to a watchtower failed, but is expected
        /// to succeed at some point in the future).
@@ -47,6 +48,22 @@ pub enum ChannelMonitorUpdateErr {
        /// submitting new commitment transactions to the remote party.
        /// ChannelManager::test_restore_channel_monitor can be used to retry the update(s) and restore
        /// the channel to an operational state.
+       ///
+       /// Note that continuing to operate when no copy of the updated ChannelMonitor could be
+       /// persisted is unsafe - if you failed to store the update on your own local disk you should
+       /// instead return PermanentFailure to force closure of the channel ASAP.
+       ///
+       /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
+       /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
+       /// to claim it on this channel) and those updates must be applied wherever they can be. At
+       /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should
+       /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to
+       /// the channel which would invalidate previous ChannelMonitors are not made when a channel has
+       /// been "frozen".
+       ///
+       /// Note that even if updates made after TemporaryFailure succeed you must still call
+       /// test_restore_channel_monitor to ensure you have the latest monitor and re-enable normal
+       /// channel operation.
        TemporaryFailure,
        /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
        /// different watchtower and cannot update with all watchtowers that were previously informed
@@ -155,7 +172,13 @@ impl ManyChannelMonitor for SimpleManyChannelMonitor<OutPoint> {
 const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
 /// If an HTLC expires within this many blocks, force-close the channel to broadcast the
 /// HTLC-Success transaction.
-const CLTV_CLAIM_BUFFER: u32 = 6;
+/// In other words, this is an upper bound on how many blocks we think it can take us to get a
+/// transaction confirmed (and we use it in a few more, equivalent, places).
+pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6;
+/// Number of blocks by which point we expect our counterparty to have seen new blocks on the
+/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
+/// copies of ChannelMonitors, including watchtowers).
+pub(crate) const HTLC_FAIL_TIMEOUT_BLOCKS: u32 = 3;
 
 #[derive(Clone, PartialEq)]
 enum KeyStorage {
@@ -759,7 +782,7 @@ impl ChannelMonitor {
                                        ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &per_commitment_point, &htlc_base_key)))
                                },
                        };
-                       let delayed_key = ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &self.delayed_payment_base_key));
+                       let delayed_key = ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &self.their_delayed_payment_base_key.unwrap()));
                        let a_htlc_key = match self.their_htlc_base_key {
                                None => return (txn_to_broadcast, (commitment_txid, watch_outputs)),
                                Some(their_htlc_base_key) => ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &their_htlc_base_key)),
@@ -856,7 +879,7 @@ impl ChannelMonitor {
                                                };
                                                let sighash_parts = bip143::SighashComponents::new(&single_htlc_tx);
                                                sign_input!(sighash_parts, single_htlc_tx.input[0], Some(idx), htlc.amount_msat / 1000);
-                                               txn_to_broadcast.push(single_htlc_tx); // TODO: This is not yet tested in ChannelManager!
+                                               txn_to_broadcast.push(single_htlc_tx);
                                        }
                                }
                        }
@@ -1184,16 +1207,7 @@ impl ChannelMonitor {
                        }
                }
                if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx {
-                       let mut needs_broadcast = false;
-                       for &(ref htlc, _, _) in cur_local_tx.htlc_outputs.iter() {
-                               if htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER {
-                                       if htlc.offered || self.payment_preimages.contains_key(&htlc.payment_hash) {
-                                               needs_broadcast = true;
-                                       }
-                               }
-                       }
-
-                       if needs_broadcast {
+                       if self.would_broadcast_at_height(height) {
                                broadcaster.broadcast_transaction(&cur_local_tx.tx);
                                for tx in self.broadcast_by_local_state(&cur_local_tx) {
                                        broadcaster.broadcast_transaction(&tx);
@@ -1206,10 +1220,29 @@ impl ChannelMonitor {
        pub(super) fn would_broadcast_at_height(&self, height: u32) -> bool {
                if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx {
                        for &(ref htlc, _, _) in cur_local_tx.htlc_outputs.iter() {
-                               if htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER {
-                                       if htlc.offered || self.payment_preimages.contains_key(&htlc.payment_hash) {
-                                               return true;
-                                       }
+                               // For inbound HTLCs which we know the preimage for, we have to ensure we hit the
+                               // chain with enough room to claim the HTLC without our counterparty being able to
+                               // time out the HTLC first.
+                               // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary
+                               // concern is being able to claim the corresponding inbound HTLC (on another
+                               // channel) before it expires. In fact, we don't even really care if our
+                               // counterparty here claims such an outbound HTLC after it expired as long as we
+                               // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
+                               // chain when our counterparty is waiting for expiration to off-chain fail an HTLC
+                               // we give ourselves a few blocks of headroom after expiration before going
+                               // on-chain for an expired HTLC.
+                               // Note that, to avoid a potential attack whereby a node delays claiming an HTLC
+                               // from us until we've reached the point where we go on-chain with the
+                               // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
+                               // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
+                               //  aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER
+                               //      inbound_cltv == height + CLTV_CLAIM_BUFFER
+                               //      outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFER <= inbound_cltv - CLTV_CLAIM_BUFFER
+                               //      HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= inbound_cltv - outbound_cltv
+                               //      HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= CLTV_EXPIRY_DELTA
+                               if ( htlc.offered && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) ||
+                                  (!htlc.offered && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
+                                       return true;
                                }
                        }
                }