X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=inline;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=31c8dccae637e6c93a9ae41affa3f94da129ea3e;hb=47ad3d6bd87affc14281ac8dbf62d69b6c066072;hp=450d014033a895656d39cec81f7daf0caeb142ab;hpb=035dda67080431f460c18bd1087e511c0ab778ec;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 450d0140..31c8dcca 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -39,6 +39,9 @@ use chain::Watch; use chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; +// Since this struct is returned in `list_channels` methods, expose it here in case users want to +// construct one themselves. +pub use ln::channel::CounterpartyForwardingInfo; use ln::channel::{Channel, ChannelError}; use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; @@ -480,10 +483,11 @@ pub struct ChainParameters { } /// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is -/// desirable to notify any listeners on `wait_timeout`/`wait` that new updates are available for -/// persistence. Therefore, this struct is responsible for locking the total consistency lock and, -/// upon going out of scope, sending the aforementioned notification (since the lock being released -/// indicates that the updates are ready for persistence). +/// desirable to notify any listeners on `await_persistable_update_timeout`/ +/// `await_persistable_update` that new updates are available for persistence. Therefore, this +/// struct is responsible for locking the total consistency lock and, upon going out of scope, +/// sending the aforementioned notification (since the lock being released indicates that the +/// updates are ready for persistence). struct PersistenceNotifierGuard<'a> { persistence_notifier: &'a PersistenceNotifier, // We hold onto this result so the lock doesn't get released immediately. @@ -507,18 +511,28 @@ impl<'a> Drop for PersistenceNotifierGuard<'a> { } } -/// The amount of time we require our counterparty wait to claim their money (ie time between when -/// we, or our watchtower, must check for them having broadcast a theft transaction). -pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24; -/// The amount of time we're willing to wait to claim money back to us -pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7; +/// The amount of time in blocks we require our counterparty wait to claim their money (ie time +/// between when we, or our watchtower, must check for them having broadcast a theft transaction). +/// +/// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`] +/// +/// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay +pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24; +/// The amount of time in blocks we're willing to wait to claim money back to us. This matches +/// the maximum required amount in lnd as of March 2021. +pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7; /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound -/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, -/// ie the node we forwarded the payment on to should always have enough room to reliably time out -/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the -/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more). -const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO? +/// HTLC's CLTV. The current default represents roughly six hours of blocks at six blocks/hour. +/// +/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`] +/// +/// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta +// This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, +// i.e. the node we forwarded the payment on to should always have enough room to reliably time out +// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the +// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more). +pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6 * 6; pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, @@ -529,13 +543,13 @@ pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? // LATENCY_GRACE_PERIOD_BLOCKS. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; +const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See // ChannelMontior::would_broadcast_at_height for a description of why this is needed. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; +const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels #[derive(Clone)] @@ -572,6 +586,10 @@ pub struct ChannelDetails { /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b) /// the peer is connected, and (c) no monitor update failure is pending resolution. pub is_live: bool, + + /// Information on the fees and requirements that the counterparty requires when forwarding + /// payments to us through this channel. + pub counterparty_forwarding_info: Option, } /// If a payment fails to send, it can be in one of several states. This enum is returned as the @@ -890,6 +908,7 @@ impl ChannelMana outbound_capacity_msat, user_id: channel.get_user_id(), is_live: channel.is_live(), + counterparty_forwarding_info: channel.counterparty_forwarding_info(), }); } } @@ -983,16 +1002,14 @@ impl ChannelMana } } - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<(), APIError> { + fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result { let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) { if let Some(node_id) = peer_node_id { if chan.get().get_counterparty_node_id() != *node_id { - // Error or Ok here doesn't matter - the result is only exposed publicly - // when peer_node_id is None anyway. - return Ok(()); + return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); } } if let Some(short_id) = chan.get().get_short_channel_id() { @@ -1012,14 +1029,27 @@ impl ChannelMana }); } - Ok(()) + Ok(chan.get_counterparty_node_id()) } /// Force closes a channel, immediately broadcasting the latest local commitment transaction to /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager. pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); - self.force_close_channel_with_peer(channel_id, None) + match self.force_close_channel_with_peer(channel_id, None) { + Ok(counterparty_node_id) => { + self.channel_state.lock().unwrap().pending_msg_events.push( + events::MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() } + }, + } + ); + Ok(()) + }, + Err(e) => Err(e) + } } /// Force close all channels, immediately broadcasting the latest local commitment transaction @@ -1257,7 +1287,7 @@ impl ChannelMana if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap()))); } - if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry + if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); } let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; @@ -1315,7 +1345,7 @@ impl ChannelMana short_channel_id, timestamp: chan.get_update_time_counter(), flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1), - cltv_expiry_delta: CLTV_EXPIRY_DELTA, + cltv_expiry_delta: chan.get_cltv_expiry_delta(), htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(), htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()), fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator), @@ -2992,6 +3022,29 @@ impl ChannelMana Ok(()) } + fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) { + Some(chan_id) => chan_id.clone(), + None => { + // It's not a local channel + return Ok(()) + } + }; + match channel_state.by_id.entry(chan_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id)); + } + try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); + }, + hash_map::Entry::Vacant(_) => unreachable!() + } + Ok(()) + } + fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3152,6 +3205,12 @@ impl ChannelMana msg: update }); } + pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } + }, + }); } }, } @@ -3234,12 +3293,26 @@ where L::Target: Logger, { fn block_connected(&self, block: &Block, height: u32) { + assert_eq!(*self.last_block_hash.read().unwrap(), block.header.prev_blockhash, + "Blocks must be connected in chain-order - the connected header must build on the last connected header"); + assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1, + "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height"); let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); - ChannelManager::block_connected(self, &block.header, &txdata, height); + self.transactions_confirmed(&block.header, height, &txdata); + self.update_best_block(&block.header, height); } - fn block_disconnected(&self, header: &BlockHeader, _height: u32) { - ChannelManager::block_disconnected(self, header); + fn block_disconnected(&self, header: &BlockHeader, height: u32) { + assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(), + "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header"); + + let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); + let new_height = self.latest_block_height.fetch_sub(1, Ordering::AcqRel) as u32 - 1; + assert_eq!(new_height, height - 1, + "Blocks must be disconnected in chain-order - the disconnected block must have the correct height"); + *self.last_block_hash.write().unwrap() = header.prev_blockhash; + + self.do_chain_event(new_height, |channel| channel.update_best_block(new_height, header.time)); } } @@ -3250,18 +3323,11 @@ impl ChannelMana F::Target: FeeEstimator, L::Target: Logger, { - /// Updates channel state based on transactions seen in a connected block. - pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { + fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>> + (&self, height: u32, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. // See the docs for `ChannelManagerReadArgs` for more. - let block_hash = header.block_hash(); - log_trace!(self.logger, "Block {} at height {} connected", block_hash, height); - - let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); - - self.latest_block_height.store(height as usize, Ordering::Release); - *self.last_block_hash.write().unwrap() = block_hash; let mut failed_channels = Vec::new(); let mut timed_out_htlcs = Vec::new(); @@ -3271,7 +3337,7 @@ impl ChannelMana let short_to_id = &mut channel_state.short_to_id; let pending_msg_events = &mut channel_state.pending_msg_events; channel_state.by_id.retain(|_, channel| { - let res = channel.block_connected(header, txdata, height); + let res = f(channel); if let Ok((chan_res, mut timed_out_pending_htlcs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe @@ -3297,32 +3363,23 @@ impl ChannelMana short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); } } else if let Err(e) = res { + if let Some(short_id) = channel.get_short_channel_id() { + short_to_id.remove(&short_id); + } + // It looks like our counterparty went on-chain or funding transaction was + // reorged out of the main chain. Close the channel. + failed_channels.push(channel.force_shutdown(true)); + if let Ok(update) = self.get_channel_update(&channel) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: e }, }); return false; } - if let Some(funding_txo) = channel.get_funding_txo() { - for &(_, tx) in txdata.iter() { - for inp in tx.input.iter() { - if inp.previous_output == funding_txo.into_bitcoin_outpoint() { - log_trace!(self.logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id())); - if let Some(short_id) = channel.get_short_channel_id() { - short_to_id.remove(&short_id); - } - // It looks like our counterparty went on-chain. Close the channel. - failed_channels.push(channel.force_shutdown(true)); - if let Ok(update) = self.get_channel_update(&channel) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - return false; - } - } - } - } true }); @@ -3351,6 +3408,64 @@ impl ChannelMana for (source, payment_hash, reason) in timed_out_htlcs.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason); } + } + + /// Updates channel state to take note of transactions which were confirmed in the given block + /// at the given height. + /// + /// Note that you must still call (or have called) [`update_best_block`] with the block + /// information which is included here. + /// + /// This method may be called before or after [`update_best_block`] for a given block's + /// transaction data and may be called multiple times with additional transaction data for a + /// given block. + /// + /// This method may be called for a previous block after an [`update_best_block`] call has + /// been made for a later block, however it must *not* be called with transaction data from a + /// block which is no longer in the best chain (ie where [`update_best_block`] has already + /// been informed about a blockchain reorganization which no longer includes the block which + /// corresponds to `header`). + /// + /// [`update_best_block`]: `Self::update_best_block` + pub fn transactions_confirmed(&self, header: &BlockHeader, height: u32, txdata: &TransactionData) { + // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called + // during initialization prior to the chain_monitor being fully configured in some cases. + // See the docs for `ChannelManagerReadArgs` for more. + + let block_hash = header.block_hash(); + log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); + + let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); + self.do_chain_event(height, |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|a| (a, Vec::new()))); + } + + /// Updates channel state with the current best blockchain tip. You should attempt to call this + /// quickly after a new block becomes available, however if multiple new blocks become + /// available at the same time, only a single `update_best_block()` call needs to be made. + /// + /// This method should also be called immediately after any block disconnections, once at the + /// reorganization fork point, and once with the new chain tip. Calling this method at the + /// blockchain reorganization fork point ensures we learn when a funding transaction which was + /// previously confirmed is reorganized out of the blockchain, ensuring we do not continue to + /// accept payments which cannot be enforced on-chain. + /// + /// In both the block-connection and block-disconnection case, this method may be called either + /// once per block connected or disconnected, or simply at the fork point and new tip(s), + /// skipping any intermediary blocks. + pub fn update_best_block(&self, header: &BlockHeader, height: u32) { + // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called + // during initialization prior to the chain_monitor being fully configured in some cases. + // See the docs for `ChannelManagerReadArgs` for more. + + let block_hash = header.block_hash(); + log_trace!(self.logger, "New best block: {} at height {}", block_hash, height); + + let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); + + self.latest_block_height.store(height as usize, Ordering::Release); + *self.last_block_hash.write().unwrap() = block_hash; + + self.do_chain_event(height, |channel| channel.update_best_block(height, header.time)); loop { // Update last_node_announcement_serial to be the max of its current value and the @@ -3366,58 +3481,20 @@ impl ChannelMana } } - /// Updates channel state based on a disconnected block. - /// - /// If necessary, the channel may be force-closed without letting the counterparty participate - /// in the shutdown. - pub fn block_disconnected(&self, header: &BlockHeader) { - // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called - // during initialization prior to the chain_monitor being fully configured in some cases. - // See the docs for `ChannelManagerReadArgs` for more. - let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); - - self.latest_block_height.fetch_sub(1, Ordering::AcqRel); - *self.last_block_hash.write().unwrap() = header.block_hash(); - - let mut failed_channels = Vec::new(); - { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; - let short_to_id = &mut channel_state.short_to_id; - let pending_msg_events = &mut channel_state.pending_msg_events; - channel_state.by_id.retain(|_, v| { - if v.block_disconnected(header) { - if let Some(short_id) = v.get_short_channel_id() { - short_to_id.remove(&short_id); - } - failed_channels.push(v.force_shutdown(true)); - if let Ok(update) = self.get_channel_update(&v) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - false - } else { - true - } - }); - } - - self.handle_init_event_channel_failures(failed_channels); - } - /// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool - /// indicating whether persistence is necessary. Only one listener on `wait_timeout` is - /// guaranteed to be woken up. + /// indicating whether persistence is necessary. Only one listener on + /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken + /// up. /// Note that the feature `allow_wallclock_use` must be enabled to use this function. #[cfg(any(test, feature = "allow_wallclock_use"))] - pub fn wait_timeout(&self, max_wait: Duration) -> bool { + pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool { self.persistence_notifier.wait_timeout(max_wait) } - /// Blocks until ChannelManager needs to be persisted. Only one listener on `wait` is - /// guaranteed to be woken up. - pub fn wait(&self) { + /// Blocks until ChannelManager needs to be persisted. Only one listener on + /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken + /// up. + pub fn await_persistable_update(&self) { self.persistence_notifier.wait() } @@ -3513,6 +3590,11 @@ impl true, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, &events::MessageSendEvent::SendShortIdsQuery { .. } => false, + &events::MessageSendEvent::SendReplyChannelRange { .. } => false, } }); } @@ -3669,7 +3752,7 @@ impl