X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=afabcd7cc6bb30a294dcf8326d721ac718952e2d;hb=8ba352952242421041cef4019d0bf2172e712986;hp=e3bd06fd2bb72d0137bdfa8482ae34453801b499;hpb=100197c483d00883a01c09fd4e9efa613d470622;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index e3bd06fd..afabcd7c 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -28,14 +28,15 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use ln::router::Route; use ln::msgs; +use ln::msgs::LocalFeatures; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError}; use chain::keysinterface::KeysInterface; use util::config::UserConfig; -use util::{byte_utils, events, rng}; +use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, Writeable, Writer}; use util::chacha20::ChaCha20; use util::logger::Logger; @@ -213,11 +214,11 @@ impl MsgHandleErrInternal { } } -/// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This -/// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second -/// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could -/// probably increase this significantly. -const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50; +/// We hold back HTLCs we intend to relay for a random interval greater than this (see +/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited). +/// This provides some limited amount of privacy. Ideally this would range from somewhere like one +/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. +const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; pub(super) enum HTLCForwardInfo { AddHTLC { @@ -340,6 +341,12 @@ pub struct ChannelManager { logger: Arc, } +/// The amount of time we require our counterparty wait to claim their money (ie time between when +/// we, or our watchtower, must check for them having broadcast a theft transaction). +pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24; +/// The amount of time we're willing to wait to claim money back to us +pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7; + /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, /// ie the node we forwarded the payment on to should always have enough room to reliably time out @@ -348,20 +355,21 @@ pub struct ChannelManager { const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO? pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? -// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS + -// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within -// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it -// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel -// on-chain to time out the HTLC. +// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, +// ie that if the next-hop peer fails the HTLC within +// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain, +// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and +// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before +// LATENCY_GRACE_PERIOD_BLOCKS. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY; +const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See // ChannelMontior::would_broadcast_at_height for a description of why this is needed. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER; +const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; macro_rules! secp_call { ( $res: expr, $err: expr ) => { @@ -841,7 +849,7 @@ impl ChannelManager { let pending_forward_info = if next_hop_data.hmac == [0; 32] { // OUR PAYMENT! // final_expiry_too_soon - if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 { + if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); } // final_incorrect_htlc_amount @@ -933,8 +941,8 @@ impl ChannelManager { break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); } let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; - // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration - if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon + // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration + if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far @@ -1123,7 +1131,7 @@ impl ChannelManager { pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) { let _ = self.total_consistency_lock.read().unwrap(); - let (chan, msg, chan_monitor) = { + let (mut chan, msg, chan_monitor) = { let (res, chan) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.remove(temporary_channel_id) { @@ -1154,8 +1162,30 @@ impl ChannelManager { }; // Because we have exclusive ownership of the channel here we can release the channel_state // lock before add_update_monitor - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + match e { + ChannelMonitorUpdateErr::PermanentFailure => { + match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None))) { + Err(e) => { + log_error!(self, "Failed to store ChannelMonitor update for funding tx generation"); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_their_node_id(), + action: e.action, + }); + return; + }, + Ok(()) => unreachable!(), + } + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + // Its completely fine to continue with a FundingCreated until the monitor + // update is persisted, as long as we don't generate the FundingBroadcastSafe + // until the monitor has been safely persisted (as funding broadcast is not, + // in fact, safe). + chan.monitor_update_failed(false, false, Vec::new(), Vec::new()); + }, + } } let mut channel_state = self.channel_state.lock().unwrap(); @@ -1484,7 +1514,7 @@ impl ChannelManager { let mut forward_event = None; if channel_state_lock.forward_htlcs.is_empty() { - forward_event = Some(Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); + forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); } match channel_state_lock.forward_htlcs.entry(short_channel_id) { hash_map::Entry::Occupied(mut entry) => { @@ -1619,6 +1649,7 @@ impl ChannelManager { let mut close_results = Vec::new(); let mut htlc_forwards = Vec::new(); let mut htlc_failures = Vec::new(); + let mut pending_events = Vec::new(); let _ = self.total_consistency_lock.read().unwrap(); { @@ -1653,7 +1684,7 @@ impl ChannelManager { ChannelMonitorUpdateErr::TemporaryFailure => true, } } else { - let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored(); + let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(); if !pending_forwards.is_empty() { htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards)); } @@ -1685,12 +1716,33 @@ impl ChannelManager { handle_cs!(); }, } + if needs_broadcast_safe { + pending_events.push(events::Event::FundingBroadcastSafe { + funding_txo: channel.get_funding_txo().unwrap(), + user_channel_id: channel.get_user_id(), + }); + } + if let Some(msg) = funding_locked { + pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { + node_id: channel.get_their_node_id(), + msg, + }); + if let Some(announcement_sigs) = self.get_announcement_sigs(channel) { + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: channel.get_their_node_id(), + msg: announcement_sigs, + }); + } + short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); + } true } } else { true } }); } + self.pending_events.lock().unwrap().append(&mut pending_events); + for failure in htlc_failures.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); } @@ -1701,12 +1753,12 @@ impl ChannelManager { } } - fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { + fn internal_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { if msg.chain_hash != self.genesis_hash { return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone())); } - let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, Arc::clone(&self.logger), &self.default_configuration) + let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_local_features, msg, 0, Arc::clone(&self.logger), &self.default_configuration) .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -1723,7 +1775,7 @@ impl ChannelManager { Ok(()) } - fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { + fn internal_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { let (value, output_script, user_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = channel_lock.borrow_parts(); @@ -1733,7 +1785,7 @@ impl ChannelManager { //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration), channel_state, chan); + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_local_features), channel_state, chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, //TODO: same as above @@ -1751,7 +1803,7 @@ impl ChannelManager { } fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { - let ((funding_msg, monitor_update), chan) = { + let ((funding_msg, monitor_update), mut chan) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = channel_lock.borrow_parts(); match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { @@ -1767,8 +1819,23 @@ impl ChannelManager { }; // Because we have exclusive ownership of the channel here we can release the channel_state // lock before add_update_monitor - if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { + match e { + ChannelMonitorUpdateErr::PermanentFailure => { + // Note that we reply with the new channel_id in error messages if we gave up on the + // channel, not the temporary_channel_id. This is compatible with ourselves, but the + // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for + // any messages referencing a previously-closed channel anyway. + return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None)); + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + // There's no problem signing a counterparty's funding transaction if our monitor + // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't + // accepted payment from yet. We do, however, need to wait to send our funding_locked + // until we have persisted our monitor. + chan.monitor_update_failed(false, false, Vec::new(), Vec::new()); + }, + } } let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -1798,8 +1865,8 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan); - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); } (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) }, @@ -2093,7 +2160,7 @@ impl ChannelManager { if !pending_forwards.is_empty() { let mut channel_state = self.channel_state.lock().unwrap(); if channel_state.forward_htlcs.is_empty() { - forward_event = Some(Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); + forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)) } for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { @@ -2489,7 +2556,7 @@ impl ChainListener for ChannelManager { } /// We force-close the channel without letting our counterparty participate in the shutdown - fn block_disconnected(&self, header: &BlockHeader) { + fn block_disconnected(&self, header: &BlockHeader, _: u32) { let _ = self.total_consistency_lock.read().unwrap(); let mut failed_channels = Vec::new(); { @@ -2524,14 +2591,14 @@ impl ChainListener for ChannelManager { impl ChannelMessageHandler for ChannelManager { //TODO: Handle errors and close channel (or so) - fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> { + fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_open_channel(their_node_id, msg)) + handle_error!(self, self.internal_open_channel(their_node_id, their_local_features, msg)) } - fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { + fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_accept_channel(their_node_id, msg)) + handle_error!(self, self.internal_accept_channel(their_node_id, their_local_features, msg)) } fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> {