Add constant for HTLC failure anti-reorg delay
[rust-lightning] / src / ln / channelmanager.rs
index f349afc18d07339843ca9a2eb5ccbd159f9de604..ccfb9f776973a217b812fbdf59eb8058bee0c96a 100644 (file)
@@ -22,7 +22,7 @@ use secp256k1;
 use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
 use chain::transaction::OutPoint;
 use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY};
 use ln::router::{Route,RouteHop};
 use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError};
@@ -341,16 +341,17 @@ pub struct ChannelManager {
 /// ie the node we forwarded the payment on to should always have enough room to reliably time out
 /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
 /// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more).
-const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
+const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
 const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
 
-// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that
-// if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have
-// HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the
-// CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC.
+// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS +
+// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within
+// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it
+// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel
+// on-chain to time out the HTLC.
 #[deny(const_err)]
 #[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER;
+const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY;
 
 // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
 // ChannelMontior::would_broadcast_at_height for a description of why this is needed.
@@ -423,6 +424,7 @@ macro_rules! break_chan_entry {
                                break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
                        },
                        Err(ChannelError::Close(msg)) => {
+                               log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
                                let (channel_id, mut chan) = $entry.remove_entry();
                                if let Some(short_id) = chan.get_short_channel_id() {
                                        $channel_state.short_to_id.remove(&short_id);
@@ -441,6 +443,7 @@ macro_rules! try_chan_entry {
                                return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
                        },
                        Err(ChannelError::Close(msg)) => {
+                               log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
                                let (channel_id, mut chan) = $entry.remove_entry();
                                if let Some(short_id) = chan.get_short_channel_id() {
                                        $channel_state.short_to_id.remove(&short_id);
@@ -681,6 +684,7 @@ impl ChannelManager {
        #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
                let (local_txn, mut failed_htlcs) = shutdown_res;
+               log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                        // unknown_next_peer...I dunno who that is anymore....
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
@@ -707,6 +711,7 @@ impl ChannelManager {
                                return;
                        }
                };
+               log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..]));
                self.finish_force_close_channel(chan.force_shutdown());
                if let Ok(update) = self.get_channel_update(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
@@ -1552,6 +1557,7 @@ impl ChannelManager {
        fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
                match source {
                        HTLCSource::OutboundRoute { .. } => {
+                               log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                mem::drop(channel_state_lock);
                                if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error {
                                        let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone());
@@ -1577,10 +1583,12 @@ impl ChannelManager {
                        HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
                                let err_packet = match onion_error {
                                        HTLCFailReason::Reason { failure_code, data } => {
+                                               log_trace!(self, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
                                                let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
                                                ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
                                        },
                                        HTLCFailReason::ErrorPacket { err } => {
+                                               log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built ErrorPacket", log_bytes!(payment_hash.0));
                                                ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data)
                                        }
                                };
@@ -2642,8 +2650,10 @@ impl events::MessageSendEventsProvider for ChannelManager {
                        //TODO: This behavior should be documented.
                        for htlc_update in self.monitor.fetch_pending_htlc_updated() {
                                if let Some(preimage) = htlc_update.payment_preimage {
+                                       log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
                                } else {
+                                       log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
                                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
                                }
                        }
@@ -2665,8 +2675,10 @@ impl events::EventsProvider for ChannelManager {
                        //TODO: This behavior should be documented.
                        for htlc_update in self.monitor.fetch_pending_htlc_updated() {
                                if let Some(preimage) = htlc_update.payment_preimage {
+                                       log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
                                } else {
+                                       log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
                                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
                                }
                        }
@@ -2681,6 +2693,8 @@ impl events::EventsProvider for ChannelManager {
 
 impl ChainListener for ChannelManager {
        fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
+               let header_hash = header.bitcoin_hash();
+               log_trace!(self, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len());
                let _ = self.total_consistency_lock.read().unwrap();
                let mut failed_channels = Vec::new();
                {
@@ -2713,6 +2727,7 @@ impl ChainListener for ChannelManager {
                                        for tx in txn_matched {
                                                for inp in tx.input.iter() {
                                                        if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
+                                                               log_trace!(self, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
                                                                if let Some(short_id) = channel.get_short_channel_id() {
                                                                        short_to_id.remove(&short_id);
                                                                }
@@ -2753,7 +2768,7 @@ impl ChainListener for ChannelManager {
                        self.finish_force_close_channel(failure);
                }
                self.latest_block_height.store(height as usize, Ordering::Release);
-               *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
+               *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
        }
 
        /// We force-close the channel without letting our counterparty participate in the shutdown
@@ -4289,12 +4304,12 @@ mod tests {
                let mut nodes = Vec::new();
                let mut rng = thread_rng();
                let secp_ctx = Secp256k1::new();
-               let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
 
                let chan_count = Rc::new(RefCell::new(0));
                let payment_count = Rc::new(RefCell::new(0));
 
-               for _ in 0..node_count {
+               for i in 0..node_count {
+                       let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
                        let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
                        let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
                        let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});