More regularly send an Error message when we force-close a channel
[rust-lightning] / lightning / src / ln / channelmanager.rs
index de0ce53590dcebfad11af1ca78e11c93547a4ad2..14fed3c40aa24f487064ee4d33fdb54318d2040f 100644 (file)
@@ -39,6 +39,9 @@ use chain::Watch;
 use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
+// Since this struct is returned in `list_channels` methods, expose it here in case users want to
+// construct one themselves.
+pub use ln::channel::CounterpartyForwardingInfo;
 use ln::channel::{Channel, ChannelError};
 use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
@@ -508,18 +511,28 @@ impl<'a> Drop for PersistenceNotifierGuard<'a> {
        }
 }
 
-/// The amount of time we require our counterparty wait to claim their money (ie time between when
-/// we, or our watchtower, must check for them having broadcast a theft transaction).
-pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
-/// The amount of time we're willing to wait to claim money back to us
-pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7;
+/// The amount of time in blocks we require our counterparty wait to claim their money (ie time
+/// between when we, or our watchtower, must check for them having broadcast a theft transaction).
+///
+/// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`]
+///
+/// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay
+pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
+/// The amount of time in blocks we're willing to wait to claim money back to us. This matches
+/// the maximum required amount in lnd as of March 2021.
+pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
 
 /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
-/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
-/// ie the node we forwarded the payment on to should always have enough room to reliably time out
-/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
-/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
-const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
+/// HTLC's CLTV. The current default represents roughly six hours of blocks at six blocks/hour.
+///
+/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
+///
+/// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta
+// This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
+// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
+// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
+// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
+pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6 * 6;
 pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
 
 // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
@@ -530,13 +543,13 @@ pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
 // LATENCY_GRACE_PERIOD_BLOCKS.
 #[deny(const_err)]
 #[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
+const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
 
 // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
 // ChannelMontior::would_broadcast_at_height for a description of why this is needed.
 #[deny(const_err)]
 #[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 
 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
 #[derive(Clone)]
@@ -573,6 +586,10 @@ pub struct ChannelDetails {
        /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
        /// the peer is connected, and (c) no monitor update failure is pending resolution.
        pub is_live: bool,
+
+       /// Information on the fees and requirements that the counterparty requires when forwarding
+       /// payments to us through this channel.
+       pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
 }
 
 /// If a payment fails to send, it can be in one of several states. This enum is returned as the
@@ -891,6 +908,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        outbound_capacity_msat,
                                        user_id: channel.get_user_id(),
                                        is_live: channel.is_live(),
+                                       counterparty_forwarding_info: channel.counterparty_forwarding_info(),
                                });
                        }
                }
@@ -984,16 +1002,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<(), APIError> {
+       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
                                if let Some(node_id) = peer_node_id {
                                        if chan.get().get_counterparty_node_id() != *node_id {
-                                               // Error or Ok here doesn't matter - the result is only exposed publicly
-                                               // when peer_node_id is None anyway.
-                                               return Ok(());
+                                               return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
                                        }
                                }
                                if let Some(short_id) = chan.get().get_short_channel_id() {
@@ -1013,14 +1029,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        });
                }
 
-               Ok(())
+               Ok(chan.get_counterparty_node_id())
        }
 
        /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
        /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
        pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
-               self.force_close_channel_with_peer(channel_id, None)
+               match self.force_close_channel_with_peer(channel_id, None) {
+                       Ok(counterparty_node_id) => {
+                               self.channel_state.lock().unwrap().pending_msg_events.push(
+                                       events::MessageSendEvent::HandleError {
+                                               node_id: counterparty_node_id,
+                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                       msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+                                               },
+                                       }
+                               );
+                               Ok(())
+                       },
+                       Err(e) => Err(e)
+               }
        }
 
        /// Force close all channels, immediately broadcasting the latest local commitment transaction
@@ -1258,7 +1287,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
                                                break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
                                        }
-                                       if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+                                       if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
                                                break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
                                        }
                                        let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
@@ -1316,7 +1345,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        short_channel_id,
                        timestamp: chan.get_update_time_counter(),
                        flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
-                       cltv_expiry_delta: CLTV_EXPIRY_DELTA,
+                       cltv_expiry_delta: chan.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
                        htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
                        fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator),
@@ -2993,6 +3022,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(())
        }
 
+       fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> {
+               let mut channel_state_lock = self.channel_state.lock().unwrap();
+               let channel_state = &mut *channel_state_lock;
+               let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
+                       Some(chan_id) => chan_id.clone(),
+                       None => {
+                               // It's not a local channel
+                               return Ok(())
+                       }
+               };
+               match channel_state.by_id.entry(chan_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                       // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id));
+                               }
+                               try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+                       },
+                       hash_map::Entry::Vacant(_) => unreachable!()
+               }
+               Ok(())
+       }
+
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -3153,6 +3205,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        msg: update
                                                                });
                                                        }
+                                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                               node_id: chan.get_counterparty_node_id(),
+                                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                                       msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+                                                               },
+                                                       });
                                                }
                                        },
                                }
@@ -3261,6 +3319,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
 
+               assert_eq!(*self.last_block_hash.read().unwrap(), header.prev_blockhash,
+                       "Blocks must be connected in chain-order - the connected header must build on the last connected header");
+               assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1,
+                       "Blocks must be connected in chain-order - the connected header must build on the last connected header");
                self.latest_block_height.store(height as usize, Ordering::Release);
                *self.last_block_hash.write().unwrap() = block_hash;
 
@@ -3319,6 +3381,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                msg: update
                                                                        });
                                                                }
+                                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                                       node_id: channel.get_counterparty_node_id(),
+                                                                       action: msgs::ErrorAction::SendErrorMessage {
+                                                                               msg: msgs::ErrorMessage { channel_id: channel.channel_id(), data: "Commitment or closing transaction was confirmed on chain.".to_owned() }
+                                                                       },
+                                                               });
                                                                return false;
                                                        }
                                                }
@@ -3377,7 +3445,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                // See the docs for `ChannelManagerReadArgs` for more.
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
 
-               self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
+               assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(),
+                       "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
+               let new_height = self.latest_block_height.fetch_sub(1, Ordering::AcqRel) as u32 - 1;
                *self.last_block_hash.write().unwrap() = header.prev_blockhash;
 
                let mut failed_channels = Vec::new();
@@ -3386,8 +3456,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let channel_state = &mut *channel_lock;
                        let short_to_id = &mut channel_state.short_to_id;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
-                       channel_state.by_id.retain(|_,  v| {
-                               if v.block_disconnected(header) {
+                       channel_state.by_id.retain(|channel_id,  v| {
+                               if v.block_disconnected(header, new_height) {
                                        if let Some(short_id) = v.get_short_channel_id() {
                                                short_to_id.remove(&short_id);
                                        }
@@ -3397,6 +3467,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        msg: update
                                                });
                                        }
+                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                               node_id: v.get_counterparty_node_id(),
+                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                       msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Funding transaction was un-confirmed.".to_owned() }
+                                               },
+                                       });
                                        false
                                } else {
                                        true
@@ -3516,6 +3592,11 @@ impl<Signer: Sign, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sy
                let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
        }
 
+       fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
+               let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+               let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
+       }
+
        fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
                let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
@@ -3595,6 +3676,7 @@ impl<Signer: Sign, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sy
                                        &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
                                        &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
+                                       &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
                                }
                        });
                }