X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmonitor.rs;h=60cb9c91583d604fa8fa464bd0b0f60790c48005;hb=c662dd3e946749f2c4a4c7e3169963d4e7906048;hp=8d6d23ad00a160b6342eade2dce0fffbf33ca06c;hpb=09831934d1a90fb793a0d0550d29c7b3c9a896b9;p=rust-lightning diff --git a/src/ln/channelmonitor.rs b/src/ln/channelmonitor.rs index 8d6d23ad..60cb9c91 100644 --- a/src/ln/channelmonitor.rs +++ b/src/ln/channelmonitor.rs @@ -39,6 +39,7 @@ use std::sync::{Arc,Mutex}; use std::{hash,cmp}; /// An error enum representing a failure to persist a channel monitor update. +#[derive(Clone)] pub enum ChannelMonitorUpdateErr { /// Used to indicate a temporary failure (eg connection to a watchtower failed, but is expected /// to succeed at some point in the future). @@ -47,6 +48,22 @@ pub enum ChannelMonitorUpdateErr { /// submitting new commitment transactions to the remote party. /// ChannelManager::test_restore_channel_monitor can be used to retry the update(s) and restore /// the channel to an operational state. + /// + /// Note that continuing to operate when no copy of the updated ChannelMonitor could be + /// persisted is unsafe - if you failed to store the update on your own local disk you should + /// instead return PermanentFailure to force closure of the channel ASAP. + /// + /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur + /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting + /// to claim it on this channel) and those updates must be applied wherever they can be. At + /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should + /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to + /// the channel which would invalidate previous ChannelMonitors are not made when a channel has + /// been "frozen". + /// + /// Note that even if updates made after TemporaryFailure succeed you must still call + /// test_restore_channel_monitor to ensure you have the latest monitor and re-enable normal + /// channel operation. TemporaryFailure, /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a /// different watchtower and cannot update with all watchtowers that were previously informed @@ -155,7 +172,13 @@ impl ManyChannelMonitor for SimpleManyChannelMonitor { const CLTV_SHARED_CLAIM_BUFFER: u32 = 12; /// If an HTLC expires within this many blocks, force-close the channel to broadcast the /// HTLC-Success transaction. -const CLTV_CLAIM_BUFFER: u32 = 6; +/// In other words, this is an upper bound on how many blocks we think it can take us to get a +/// transaction confirmed (and we use it in a few more, equivalent, places). +pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6; +/// Number of blocks by which point we expect our counterparty to have seen new blocks on the +/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our +/// copies of ChannelMonitors, including watchtowers). +pub(crate) const HTLC_FAIL_TIMEOUT_BLOCKS: u32 = 3; #[derive(Clone, PartialEq)] enum KeyStorage { @@ -1184,16 +1207,7 @@ impl ChannelMonitor { } } if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx { - let mut needs_broadcast = false; - for &(ref htlc, _, _) in cur_local_tx.htlc_outputs.iter() { - if htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER { - if htlc.offered || self.payment_preimages.contains_key(&htlc.payment_hash) { - needs_broadcast = true; - } - } - } - - if needs_broadcast { + if self.would_broadcast_at_height(height) { broadcaster.broadcast_transaction(&cur_local_tx.tx); for tx in self.broadcast_by_local_state(&cur_local_tx) { broadcaster.broadcast_transaction(&tx); @@ -1206,10 +1220,29 @@ impl ChannelMonitor { pub(super) fn would_broadcast_at_height(&self, height: u32) -> bool { if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx { for &(ref htlc, _, _) in cur_local_tx.htlc_outputs.iter() { - if htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER { - if htlc.offered || self.payment_preimages.contains_key(&htlc.payment_hash) { - return true; - } + // For inbound HTLCs which we know the preimage for, we have to ensure we hit the + // chain with enough room to claim the HTLC without our counterparty being able to + // time out the HTLC first. + // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary + // concern is being able to claim the corresponding inbound HTLC (on another + // channel) before it expires. In fact, we don't even really care if our + // counterparty here claims such an outbound HTLC after it expired as long as we + // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the + // chain when our counterparty is waiting for expiration to off-chain fail an HTLC + // we give ourselves a few blocks of headroom after expiration before going + // on-chain for an expired HTLC. + // Note that, to avoid a potential attack whereby a node delays claiming an HTLC + // from us until we've reached the point where we go on-chain with the + // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at + // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC. + // aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER + // inbound_cltv == height + CLTV_CLAIM_BUFFER + // outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFER <= inbound_cltv - CLTV_CLAIM_BUFFER + // HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= inbound_cltv - outbound_cltv + // HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= CLTV_EXPIRY_DELTA + if ( htlc.offered && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) || + (!htlc.offered && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) { + return true; } } }