Merge pull request #273 from ariard/2018-12-17-replace-by-permanent-channel-failure
[rust-lightning] / src / ln / channelmanager.rs
index d36945874fa6ce34d70a34e66f0b8a7fdac39092..64c7de855fe803b9b74032959669ce348189f6e3 100644 (file)
@@ -22,7 +22,7 @@ use secp256k1;
 use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
 use chain::transaction::OutPoint;
 use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY};
 use ln::router::{Route,RouteHop};
 use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError};
@@ -341,16 +341,17 @@ pub struct ChannelManager {
 /// ie the node we forwarded the payment on to should always have enough room to reliably time out
 /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
 /// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more).
-const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
+const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
 const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
 
-// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that
-// if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have
-// HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the
-// CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC.
+// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS +
+// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within
+// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it
+// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel
+// on-chain to time out the HTLC.
 #[deny(const_err)]
 #[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER;
+const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY;
 
 // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
 // ChannelMontior::would_broadcast_at_height for a description of why this is needed.
@@ -661,8 +662,7 @@ impl ChannelManager {
                        }
                };
                for htlc_source in failed_htlcs.drain(..) {
-                       // unknown_next_peer...I dunno who that is anymore....
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                let chan_update = if let Some(chan) = chan_option {
                        if let Ok(update) = self.get_channel_update(&chan) {
@@ -685,8 +685,7 @@ impl ChannelManager {
                let (local_txn, mut failed_htlcs) = shutdown_res;
                log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
-                       // unknown_next_peer...I dunno who that is anymore....
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                for tx in local_txn {
                        self.tx_broadcaster.broadcast_transaction(&tx);
@@ -1970,8 +1969,7 @@ impl ChannelManager {
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
-                       // unknown_next_peer...I dunno who that is anymore....
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                if let Some(chan) = chan_option {
                        if let Ok(update) = self.get_channel_update(&chan) {
@@ -2653,7 +2651,7 @@ impl events::MessageSendEventsProvider for ChannelManager {
                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
                                } else {
                                        log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
+                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                                }
                        }
                }
@@ -2678,7 +2676,7 @@ impl events::EventsProvider for ChannelManager {
                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
                                } else {
                                        log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
+                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                                }
                        }
                }
@@ -4303,12 +4301,12 @@ mod tests {
                let mut nodes = Vec::new();
                let mut rng = thread_rng();
                let secp_ctx = Secp256k1::new();
-               let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
 
                let chan_count = Rc::new(RefCell::new(0));
                let payment_count = Rc::new(RefCell::new(0));
 
-               for _ in 0..node_count {
+               for i in 0..node_count {
+                       let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
                        let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
                        let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
                        let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});