X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=10c5ce0a11cb2a8985f5e1e5d4f39a15e7b2e706;hb=c05e087c430e05934a5312342f886f9d19889b20;hp=23d5eb21f75269fed9270c8c33913db9453b0d3c;hpb=2950ffe54e480610dcaf0188a8f556165bbe7efe;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 23d5eb21..10c5ce0a 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -12,11 +12,12 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::util::hash::{BitcoinHash, Sha256dHash}; +use bitcoin::util::hash::BitcoinHash; use bitcoin_hashes::{Hash, HashEngine}; use bitcoin_hashes::hmac::{Hmac, HmacEngine}; use bitcoin_hashes::sha256::Hash as Sha256; +use bitcoin_hashes::sha256d::Hash as Sha256dHash; use bitcoin_hashes::cmp::fixed_time_eq; use secp256k1::key::{SecretKey,PublicKey}; @@ -27,14 +28,15 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use ln::router::Route; use ln::msgs; +use ln::msgs::LocalFeatures; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError}; use chain::keysinterface::KeysInterface; use util::config::UserConfig; -use util::{byte_utils, events, rng}; +use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, Writeable, Writer}; use util::chacha20::ChaCha20; use util::logger::Logger; @@ -45,7 +47,7 @@ use std::collections::{HashMap, hash_map, HashSet}; use std::io::Cursor; use std::sync::{Arc, Mutex, MutexGuard, RwLock}; use std::sync::atomic::{AtomicUsize, Ordering}; -use std::time::{Instant,Duration}; +use std::time::Duration; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -212,11 +214,11 @@ impl MsgHandleErrInternal { } } -/// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This -/// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second -/// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could -/// probably increase this significantly. -const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50; +/// We hold back HTLCs we intend to relay for a random interval greater than this (see +/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited). +/// This provides some limited amount of privacy. Ideally this would range from somewhere like one +/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. +const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; pub(super) enum HTLCForwardInfo { AddHTLC { @@ -246,7 +248,6 @@ pub(super) enum RAACommitmentOrder { pub(super) struct ChannelHolder { pub(super) by_id: HashMap<[u8; 32], Channel>, pub(super) short_to_id: HashMap, - pub(super) next_forward: Instant, /// short channel id -> forward infos. Key of 0 means payments received /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the existence of a channel with the short id here, nor the short @@ -265,7 +266,6 @@ pub(super) struct ChannelHolder { pub(super) struct MutChannelHolder<'a> { pub(super) by_id: &'a mut HashMap<[u8; 32], Channel>, pub(super) short_to_id: &'a mut HashMap, - pub(super) next_forward: &'a mut Instant, pub(super) forward_htlcs: &'a mut HashMap>, pub(super) claimable_htlcs: &'a mut HashMap>, pub(super) pending_msg_events: &'a mut Vec, @@ -275,7 +275,6 @@ impl ChannelHolder { MutChannelHolder { by_id: &mut self.by_id, short_to_id: &mut self.short_to_id, - next_forward: &mut self.next_forward, forward_htlcs: &mut self.forward_htlcs, claimable_htlcs: &mut self.claimable_htlcs, pending_msg_events: &mut self.pending_msg_events, @@ -342,6 +341,12 @@ pub struct ChannelManager { logger: Arc, } +/// The amount of time we require our counterparty wait to claim their money (ie time between when +/// we, or our watchtower, must check for them having broadcast a theft transaction). +pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24; +/// The amount of time we're willing to wait to claim money back to us +pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7; + /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, /// ie the node we forwarded the payment on to should always have enough room to reliably time out @@ -350,20 +355,21 @@ pub struct ChannelManager { const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO? pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? -// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS + -// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within -// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it -// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel -// on-chain to time out the HTLC. +// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, +// ie that if the next-hop peer fails the HTLC within +// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain, +// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and +// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before +// LATENCY_GRACE_PERIOD_BLOCKS. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY; +const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See // ChannelMontior::would_broadcast_at_height for a description of why this is needed. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER; +const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; macro_rules! secp_call { ( $res: expr, $err: expr ) => { @@ -390,6 +396,20 @@ pub struct ChannelDetails { pub channel_value_satoshis: u64, /// The user_id passed in to create_channel, or 0 if the channel was inbound. pub user_id: u64, + /// The available outbound capacity for sending HTLCs to the remote peer. This does not include + /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not + /// available for inclusion in new outbound HTLCs). This further does not include any pending + /// outgoing HTLCs which are awaiting some other resolution to be sent. + pub outbound_capacity_msat: u64, + /// The available inbound capacity for the remote peer to send HTLCs to us. This does not + /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not + /// available for inclusion in new inbound HTLCs). + /// Note that there are some corner cases not fully handled here, so the actual available + /// inbound capacity may be slightly higher than this. + pub inbound_capacity_msat: u64, + /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b) + /// the peer is connected, and (c) no monitor update failure is pending resolution. + pub is_live: bool, } macro_rules! handle_error { @@ -493,7 +513,7 @@ macro_rules! handle_monitor_err { if !$resend_raa { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); } - $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())) }, } @@ -548,7 +568,6 @@ impl ChannelManager { channel_state: Mutex::new(ChannelHolder{ by_id: HashMap::new(), short_to_id: HashMap::new(), - next_forward: Instant::now(), forward_htlcs: HashMap::new(), claimable_htlcs: HashMap::new(), pending_msg_events: Vec::new(), @@ -612,12 +631,16 @@ impl ChannelManager { let channel_state = self.channel_state.lock().unwrap(); let mut res = Vec::with_capacity(channel_state.by_id.len()); for (channel_id, channel) in channel_state.by_id.iter() { + let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat(); res.push(ChannelDetails { channel_id: (*channel_id).clone(), short_channel_id: channel.get_short_channel_id(), remote_network_id: channel.get_their_node_id(), channel_value_satoshis: channel.get_value_satoshis(), + inbound_capacity_msat, + outbound_capacity_msat, user_id: channel.get_user_id(), + is_live: channel.is_live(), }); } res @@ -625,6 +648,9 @@ impl ChannelManager { /// Gets the list of usable channels, in random order. Useful as an argument to /// Router::get_route to ensure non-announced channels are used. + /// + /// These are guaranteed to have their is_live value set to true, see the documentation for + /// ChannelDetails::is_live for more info on exactly what the criteria are. pub fn list_usable_channels(&self) -> Vec { let channel_state = self.channel_state.lock().unwrap(); let mut res = Vec::with_capacity(channel_state.by_id.len()); @@ -633,12 +659,16 @@ impl ChannelManager { // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. if channel.is_live() { + let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat(); res.push(ChannelDetails { channel_id: (*channel_id).clone(), short_channel_id: channel.get_short_channel_id(), remote_network_id: channel.get_their_node_id(), channel_value_satoshis: channel.get_value_satoshis(), + inbound_capacity_msat, + outbound_capacity_msat, user_id: channel.get_user_id(), + is_live: true, }); } } @@ -819,7 +849,7 @@ impl ChannelManager { let pending_forward_info = if next_hop_data.hmac == [0; 32] { // OUR PAYMENT! // final_expiry_too_soon - if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 { + if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); } // final_incorrect_htlc_amount @@ -911,8 +941,8 @@ impl ChannelManager { break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); } let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; - // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration - if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon + // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration + if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far @@ -964,7 +994,7 @@ impl ChannelManager { excess_data: Vec::new(), }; - let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]); + let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]); let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key); Ok(msgs::ChannelUpdate { @@ -1158,7 +1188,7 @@ impl ChannelManager { Ok(res) => res, Err(_) => return None, // Only in case of state precondition violations eg channel is closing }; - let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]); + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); Some(msgs::AnnouncementSignatures { @@ -1183,10 +1213,6 @@ impl ChannelManager { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - if cfg!(not(feature = "fuzztarget")) && Instant::now() < *channel_state.next_forward { - return; - } - for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) { @@ -1466,8 +1492,7 @@ impl ChannelManager { let mut forward_event = None; if channel_state_lock.forward_htlcs.is_empty() { - forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); - channel_state_lock.next_forward = forward_event.unwrap(); + forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); } match channel_state_lock.forward_htlcs.entry(short_channel_id) { hash_map::Entry::Occupied(mut entry) => { @@ -1684,12 +1709,12 @@ impl ChannelManager { } } - fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { + fn internal_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { if msg.chain_hash != self.genesis_hash { return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone())); } - let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, Arc::clone(&self.logger), &self.default_configuration) + let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_local_features, msg, 0, Arc::clone(&self.logger), &self.default_configuration) .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -1706,7 +1731,7 @@ impl ChannelManager { Ok(()) } - fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { + fn internal_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { let (value, output_script, user_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = channel_lock.borrow_parts(); @@ -1716,7 +1741,7 @@ impl ChannelManager { //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration), channel_state, chan); + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_local_features), channel_state, chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, //TODO: same as above @@ -2076,8 +2101,7 @@ impl ChannelManager { if !pending_forwards.is_empty() { let mut channel_state = self.channel_state.lock().unwrap(); if channel_state.forward_htlcs.is_empty() { - forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); - channel_state.next_forward = forward_event.unwrap(); + forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)) } for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { @@ -2182,7 +2206,7 @@ impl ChannelManager { try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan); let were_node_one = announcement.node_id_1 == our_node_id; - let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]); + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); @@ -2473,7 +2497,7 @@ impl ChainListener for ChannelManager { } /// We force-close the channel without letting our counterparty participate in the shutdown - fn block_disconnected(&self, header: &BlockHeader) { + fn block_disconnected(&self, header: &BlockHeader, _: u32) { let _ = self.total_consistency_lock.read().unwrap(); let mut failed_channels = Vec::new(); { @@ -2508,14 +2532,14 @@ impl ChainListener for ChannelManager { impl ChannelMessageHandler for ChannelManager { //TODO: Handle errors and close channel (or so) - fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> { + fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_open_channel(their_node_id, msg)) + handle_error!(self, self.internal_open_channel(their_node_id, their_local_features, msg)) } - fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { + fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_accept_channel(their_node_id, msg)) + handle_error!(self, self.internal_accept_channel(their_node_id, their_local_features, msg)) } fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> { @@ -3086,7 +3110,6 @@ impl<'a, R : ::std::io::Read> ReadableArgs> for (S channel_state: Mutex::new(ChannelHolder { by_id, short_to_id, - next_forward: Instant::now(), forward_htlcs, claimable_htlcs, pending_msg_events: Vec::new(),