X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=c147ed1c4f137cd0519defe457327b3ce753a240;hb=0882655680040a0226b1790b6558150324a288f4;hp=cb75599cd90037d2b9c95aa03efe8563d498173f;hpb=f30694bd8c39a967848a84752e3b7d8d520cd0b1;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index cb75599c..c147ed1c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -64,7 +64,6 @@ use util::errors::APIError; use prelude::*; use core::{cmp, mem}; use core::cell::RefCell; -use std::collections::{HashMap, hash_map, HashSet}; use std::io::{Cursor, Read}; use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; use core::sync::atomic::{AtomicUsize, Ordering}; @@ -498,6 +497,7 @@ pub struct ChannelManager, + /// The value, in satoshis, that must always be held in the channel for our counterparty. This + /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by + /// claiming at least this value on chain. + /// + /// This value is not included in [`inbound_capacity_msat`] as it can never be spent. + /// + /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat + pub to_remote_reserve_satoshis: u64, /// The user_id passed in to create_channel, or 0 if the channel was inbound. pub user_id: u64, /// The available outbound capacity for sending HTLCs to the remote peer. This does not include /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not /// available for inclusion in new outbound HTLCs). This further does not include any pending /// outgoing HTLCs which are awaiting some other resolution to be sent. + /// + /// This value is not exact. Due to various in-flight changes, feerate changes, and our + /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we + /// should be able to spend nearly this amount. pub outbound_capacity_msat: u64, /// The available inbound capacity for the remote peer to send HTLCs to us. This does not /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not /// available for inclusion in new inbound HTLCs). /// Note that there are some corner cases not fully handled here, so the actual available /// inbound capacity may be slightly higher than this. + /// + /// This value is not exact. Due to various in-flight changes, feerate changes, and our + /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable. + /// However, our counterparty should be able to spend nearly this amount. pub inbound_capacity_msat: u64, + /// The number of required confirmations on the funding transaction before the funding will be + /// considered "locked". This number is selected by the channel fundee (i.e. us if + /// [`is_outbound`] is *not* set), and can be selected for inbound channels with + /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with + /// [`ChannelHandshakeLimits::max_minimum_depth`]. + /// + /// This value will be `None` for outbound channels until the counterparty accepts the channel. + /// + /// [`is_outbound`]: ChannelDetails::is_outbound + /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth + /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth + pub confirmations_required: Option, + /// The number of blocks (after our commitment transaction confirms) that we will need to wait + /// until we can claim our funds after we force-close the channel. During this time our + /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty + /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any + /// time to claim our non-HTLC-encumbered funds. + /// + /// This value will be `None` for outbound channels until the counterparty accepts the channel. + pub spend_csv_on_our_commitment_funds: Option, /// True if the channel was initiated (and thus funded) by us. pub is_outbound: bool, /// True if the channel is confirmed, funding_locked messages have been exchanged, and the /// channel is not currently being shut down. `funding_locked` message exchange implies the /// required confirmation count has been reached (and we were connected to the peer at some - /// point after the funding transaction received enough confirmations). + /// point after the funding transaction received enough confirmations). The required + /// confirmation count is provided in [`confirmations_required`]. + /// + /// [`confirmations_required`]: ChannelDetails::confirmations_required pub is_funding_locked: bool, /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b) - /// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the - /// channel is not currently negotiating a shutdown. + /// the peer is connected, and (c) the channel is not currently negotiating a shutdown. /// /// This is a strict superset of `is_funding_locked`. pub is_usable: bool, @@ -776,7 +824,7 @@ macro_rules! convert_chan_err { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, ChannelError::Close(msg) => { - log_trace!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); if let Some(short_id) = $channel.get_short_channel_id() { $short_to_id.remove(&short_id); } @@ -1147,6 +1195,8 @@ impl ChannelMana res.reserve(channel_state.by_id.len()); for (channel_id, channel) in channel_state.by_id.iter().filter(f) { let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat(); + let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = + channel.get_holder_counterparty_selected_channel_reserve_satoshis(); res.push(ChannelDetails { channel_id: (*channel_id).clone(), funding_txo: channel.get_funding_txo(), @@ -1154,9 +1204,13 @@ impl ChannelMana remote_network_id: channel.get_counterparty_node_id(), counterparty_features: InitFeatures::empty(), channel_value_satoshis: channel.get_value_satoshis(), + to_self_reserve_satoshis, + to_remote_reserve_satoshis, inbound_capacity_msat, outbound_capacity_msat, user_id: channel.get_user_id(), + confirmations_required: channel.minimum_depth(), + spend_csv_on_our_commitment_funds: channel.get_counterparty_selected_contest_delay(), is_outbound: channel.is_outbound(), is_funding_locked: channel.is_usable(), is_usable: channel.is_live(), @@ -1243,7 +1297,7 @@ impl ChannelMana #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (monitor_update_option, mut failed_htlcs) = shutdown_res; - log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len()); + log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } @@ -1274,7 +1328,7 @@ impl ChannelMana return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); } }; - log_trace!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown(true)); if let Ok(update) = self.get_channel_update(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); @@ -1671,6 +1725,7 @@ impl ChannelMana return Err(APIError::MonitorUpdateFailed); } + log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: path.first().unwrap().pubkey, updates: msgs::CommitmentUpdate { @@ -2060,7 +2115,7 @@ impl ChannelMana onion_packet, .. }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, prev_funding_outpoint } => { - log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id); + log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, outpoint: prev_funding_outpoint, @@ -2100,11 +2155,11 @@ impl ChannelMana panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id); + log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { Err(e) => { if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); + log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); } else { panic!("Stated return value requirements in get_update_fail_htlc() were not met"); } @@ -2158,6 +2213,8 @@ impl ChannelMana handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); continue; } + log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", + add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get().get_counterparty_node_id(), updates: msgs::CommitmentUpdate { @@ -2321,7 +2378,8 @@ impl ChannelMana } #[cfg(any(test, feature = "_test_utils"))] - pub(crate) fn test_process_background_events(&self) { + /// Process background events, for functional testing + pub fn test_process_background_events(&self) { self.process_background_events(); } @@ -2663,6 +2721,8 @@ impl ChannelMana } } if let Some((msg, commitment_signed)) = msgs { + log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get().get_counterparty_node_id(), updates: msgs::CommitmentUpdate { @@ -2926,7 +2986,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan); + try_chan_entry!(self, chan.get_mut().funding_locked(&msg, &self.logger), channel_state, chan); if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) { log_trace!(self.logger, "Sending announcement_signatures for {} in response to funding_locked", log_bytes!(chan.get().channel_id())); // If we see locking block before receiving remote funding_locked, we broadcast our @@ -3340,31 +3400,37 @@ impl ChannelMana Ok(()) } - fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> { + /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err. + fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) { Some(chan_id) => chan_id.clone(), None => { // It's not a local channel - return Ok(()) + return Ok(NotifyOption::SkipPersist) } }; match channel_state.by_id.entry(chan_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_counterparty_node_id() != *counterparty_node_id { - // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id)); + if chan.get().should_announce() { + // If the announcement is about a channel of ours which is public, some + // other peer may simply be forwarding all its gossip to us. Don't provide + // a scary-looking error message and return Ok instead. + return Ok(NotifyOption::SkipPersist); + } + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); } try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); }, hash_map::Entry::Vacant(_) => unreachable!() } - Ok(()) + Ok(NotifyOption::DoPersist) } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let (htlcs_failed_forward, chan_restoration_res) = { + let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3385,13 +3451,19 @@ impl ChannelMana msg, }); } - (htlcs_failed_forward, handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked)) + let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); + (htlcs_failed_forward, need_lnd_workaround, + handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked)) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; post_handle_chan_restoration!(self, chan_restoration_res); self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id); + + if let Some(funding_locked_msg) = need_lnd_workaround { + self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?; + } Ok(()) } @@ -3427,6 +3499,7 @@ impl ChannelMana if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { unimplemented!(); } + log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get().get_counterparty_node_id(), updates: msgs::CommitmentUpdate { @@ -3803,7 +3876,7 @@ where *best_block = BestBlock::new(header.prev_blockhash, new_height) } - self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time)); + self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, &self.logger)); } } @@ -3839,7 +3912,7 @@ where *self.best_block.write().unwrap() = BestBlock::new(block_hash, height); - self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time)); + self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, &self.logger)); macro_rules! max_time { ($timestamp: expr) => { @@ -3881,7 +3954,7 @@ where self.do_chain_event(None, |channel| { if let Some(funding_txo) = channel.get_funding_txo() { if funding_txo.txid == *txid { - channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new())) + channel.funding_transaction_unconfirmed(&self.logger).map(|_| (None, Vec::new())) } else { Ok((None, Vec::new())) } } else { Ok((None, Vec::new())) } }); @@ -4098,8 +4171,13 @@ impl } fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id); + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) { + persist + } else { + NotifyOption::SkipPersist + } + }); } fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) { @@ -4317,243 +4395,86 @@ impl PersistenceNotifier { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; -impl Writeable for PendingHTLCInfo { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - match &self.routing { - &PendingHTLCRouting::Forward { ref onion_packet, ref short_channel_id } => { - 0u8.write(writer)?; - onion_packet.write(writer)?; - short_channel_id.write(writer)?; - }, - &PendingHTLCRouting::Receive { ref payment_data, ref incoming_cltv_expiry } => { - 1u8.write(writer)?; - payment_data.payment_secret.write(writer)?; - payment_data.total_msat.write(writer)?; - incoming_cltv_expiry.write(writer)?; - }, - } - self.incoming_shared_secret.write(writer)?; - self.payment_hash.write(writer)?; - self.amt_to_forward.write(writer)?; - self.outgoing_cltv_value.write(writer)?; - Ok(()) - } -} - -impl Readable for PendingHTLCInfo { - fn read(reader: &mut R) -> Result { - Ok(PendingHTLCInfo { - routing: match Readable::read(reader)? { - 0u8 => PendingHTLCRouting::Forward { - onion_packet: Readable::read(reader)?, - short_channel_id: Readable::read(reader)?, - }, - 1u8 => PendingHTLCRouting::Receive { - payment_data: msgs::FinalOnionHopData { - payment_secret: Readable::read(reader)?, - total_msat: Readable::read(reader)?, - }, - incoming_cltv_expiry: Readable::read(reader)?, - }, - _ => return Err(DecodeError::InvalidValue), - }, - incoming_shared_secret: Readable::read(reader)?, - payment_hash: Readable::read(reader)?, - amt_to_forward: Readable::read(reader)?, - outgoing_cltv_value: Readable::read(reader)?, - }) - } -} - -impl Writeable for HTLCFailureMsg { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - match self { - &HTLCFailureMsg::Relay(ref fail_msg) => { - 0u8.write(writer)?; - fail_msg.write(writer)?; - }, - &HTLCFailureMsg::Malformed(ref fail_msg) => { - 1u8.write(writer)?; - fail_msg.write(writer)?; - } - } - Ok(()) - } -} - -impl Readable for HTLCFailureMsg { - fn read(reader: &mut R) -> Result { - match ::read(reader)? { - 0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)), - 1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)), - _ => Err(DecodeError::InvalidValue), - } - } -} - -impl Writeable for PendingHTLCStatus { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - match self { - &PendingHTLCStatus::Forward(ref forward_info) => { - 0u8.write(writer)?; - forward_info.write(writer)?; - }, - &PendingHTLCStatus::Fail(ref fail_msg) => { - 1u8.write(writer)?; - fail_msg.write(writer)?; - } - } - Ok(()) - } -} - -impl Readable for PendingHTLCStatus { - fn read(reader: &mut R) -> Result { - match ::read(reader)? { - 0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)), - 1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)), - _ => Err(DecodeError::InvalidValue), - } - } -} - -impl_writeable!(HTLCPreviousHopData, 0, { - short_channel_id, - outpoint, - htlc_id, - incoming_packet_shared_secret +impl_writeable_tlv_based_enum!(PendingHTLCRouting, + (0, Forward) => { + (0, onion_packet, required), + (2, short_channel_id, required), + }, + (1, Receive) => { + (0, payment_data, required), + (2, incoming_cltv_expiry, required), + } +;); + +impl_writeable_tlv_based!(PendingHTLCInfo, { + (0, routing, required), + (2, incoming_shared_secret, required), + (4, payment_hash, required), + (6, amt_to_forward, required), + (8, outgoing_cltv_value, required) }); -impl Writeable for ClaimableHTLC { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - self.prev_hop.write(writer)?; - self.value.write(writer)?; - self.payment_data.payment_secret.write(writer)?; - self.payment_data.total_msat.write(writer)?; - self.cltv_expiry.write(writer) - } -} - -impl Readable for ClaimableHTLC { - fn read(reader: &mut R) -> Result { - Ok(ClaimableHTLC { - prev_hop: Readable::read(reader)?, - value: Readable::read(reader)?, - payment_data: msgs::FinalOnionHopData { - payment_secret: Readable::read(reader)?, - total_msat: Readable::read(reader)?, - }, - cltv_expiry: Readable::read(reader)?, - }) - } -} - -impl Writeable for HTLCSource { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - match self { - &HTLCSource::PreviousHopData(ref hop_data) => { - 0u8.write(writer)?; - hop_data.write(writer)?; - }, - &HTLCSource::OutboundRoute { ref path, ref session_priv, ref first_hop_htlc_msat } => { - 1u8.write(writer)?; - path.write(writer)?; - session_priv.write(writer)?; - first_hop_htlc_msat.write(writer)?; - } - } - Ok(()) - } -} - -impl Readable for HTLCSource { - fn read(reader: &mut R) -> Result { - match ::read(reader)? { - 0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), - 1 => Ok(HTLCSource::OutboundRoute { - path: Readable::read(reader)?, - session_priv: Readable::read(reader)?, - first_hop_htlc_msat: Readable::read(reader)?, - }), - _ => Err(DecodeError::InvalidValue), - } - } -} - -impl Writeable for HTLCFailReason { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - match self { - &HTLCFailReason::LightningError { ref err } => { - 0u8.write(writer)?; - err.write(writer)?; - }, - &HTLCFailReason::Reason { ref failure_code, ref data } => { - 1u8.write(writer)?; - failure_code.write(writer)?; - data.write(writer)?; - } - } - Ok(()) - } -} - -impl Readable for HTLCFailReason { - fn read(reader: &mut R) -> Result { - match ::read(reader)? { - 0 => Ok(HTLCFailReason::LightningError { err: Readable::read(reader)? }), - 1 => Ok(HTLCFailReason::Reason { - failure_code: Readable::read(reader)?, - data: Readable::read(reader)?, - }), - _ => Err(DecodeError::InvalidValue), - } - } -} - -impl Writeable for HTLCForwardInfo { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - match self { - &HTLCForwardInfo::AddHTLC { ref prev_short_channel_id, ref prev_funding_outpoint, ref prev_htlc_id, ref forward_info } => { - 0u8.write(writer)?; - prev_short_channel_id.write(writer)?; - prev_funding_outpoint.write(writer)?; - prev_htlc_id.write(writer)?; - forward_info.write(writer)?; - }, - &HTLCForwardInfo::FailHTLC { ref htlc_id, ref err_packet } => { - 1u8.write(writer)?; - htlc_id.write(writer)?; - err_packet.write(writer)?; - }, - } - Ok(()) - } -} +impl_writeable_tlv_based_enum!(HTLCFailureMsg, ; + (0, Relay), + (1, Malformed), +); +impl_writeable_tlv_based_enum!(PendingHTLCStatus, ; + (0, Forward), + (1, Fail), +); + +impl_writeable_tlv_based!(HTLCPreviousHopData, { + (0, short_channel_id, required), + (2, outpoint, required), + (4, htlc_id, required), + (6, incoming_packet_shared_secret, required) +}); -impl Readable for HTLCForwardInfo { - fn read(reader: &mut R) -> Result { - match ::read(reader)? { - 0 => Ok(HTLCForwardInfo::AddHTLC { - prev_short_channel_id: Readable::read(reader)?, - prev_funding_outpoint: Readable::read(reader)?, - prev_htlc_id: Readable::read(reader)?, - forward_info: Readable::read(reader)?, - }), - 1 => Ok(HTLCForwardInfo::FailHTLC { - htlc_id: Readable::read(reader)?, - err_packet: Readable::read(reader)?, - }), - _ => Err(DecodeError::InvalidValue), - } - } -} +impl_writeable_tlv_based!(ClaimableHTLC, { + (0, prev_hop, required), + (2, value, required), + (4, payment_data, required), + (6, cltv_expiry, required), +}); -impl_writeable!(PendingInboundPayment, 0, { - payment_secret, - expiry_time, - user_payment_id, - payment_preimage, - min_value_msat +impl_writeable_tlv_based_enum!(HTLCSource, + (0, OutboundRoute) => { + (0, session_priv, required), + (2, first_hop_htlc_msat, required), + (4, path, vec_type), + }, ; + (1, PreviousHopData) +); + +impl_writeable_tlv_based_enum!(HTLCFailReason, + (0, LightningError) => { + (0, err, required), + }, + (1, Reason) => { + (0, failure_code, required), + (2, data, vec_type), + }, +;); + +impl_writeable_tlv_based_enum!(HTLCForwardInfo, + (0, AddHTLC) => { + (0, forward_info, required), + (2, prev_short_channel_id, required), + (4, prev_htlc_id, required), + (6, prev_funding_outpoint, required), + }, + (1, FailHTLC) => { + (0, htlc_id, required), + (2, err_packet, required), + }, +;); + +impl_writeable_tlv_based!(PendingInboundPayment, { + (0, payment_secret, required), + (2, expiry_time, required), + (4, user_payment_id, required), + (6, payment_preimage, required), + (8, min_value_msat, required), }); impl Writeable for ChannelManager @@ -4649,7 +4570,7 @@ impl Writeable f session_priv.write(writer)?; } - write_tlv_fields!(writer, {}, {}); + write_tlv_fields!(writer, {}); Ok(()) } @@ -4796,6 +4717,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() || channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() { // If the channel is ahead of the monitor, return InvalidValue: + log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); + log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); + log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); + log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning"); return Err(DecodeError::InvalidValue); } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || @@ -4812,6 +4740,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> by_id.insert(channel.channel_id(), channel); } } else { + log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); + log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds."); + log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning"); return Err(DecodeError::InvalidValue); } } @@ -4894,7 +4827,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } - read_tlv_fields!(reader, {}, {}); + read_tlv_fields!(reader, {}); let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); @@ -4954,6 +4887,9 @@ mod tests { use core::sync::atomic::{AtomicBool, Ordering}; use std::thread; use core::time::Duration; + use ln::functional_test_utils::*; + use ln::features::InitFeatures; + use ln::msgs::ChannelMessageHandler; #[test] fn test_wait_timeout() { @@ -4996,6 +4932,53 @@ mod tests { } } } + + #[test] + fn test_notify_limits() { + // Check that a few cases which don't require the persistence of a new ChannelManager, + // indeed, do not cause the persistence of a new ChannelManager. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + // We check that the channel info nodes have doesn't change too early, even though we try + // to connect messages with new values + chan.0.contents.fee_base_msat *= 2; + chan.1.contents.fee_base_msat *= 2; + let node_a_chan_info = nodes[0].node.list_channels()[0].clone(); + let node_b_chan_info = nodes[1].node.list_channels()[0].clone(); + + // The first two nodes (which opened a channel) should now require fresh persistence + assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + // ... but the last node should not. + assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); + // After persisting the first two nodes they should no longer need fresh persistence. + assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + + // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update + // about the channel. + nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0); + nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1); + assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); + + // The nodes which are a party to the channel should also ignore messages from unrelated + // parties. + nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0); + nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1); + nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0); + nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1); + assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + + // At this point the channel info given by peers should still be the same. + assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info); + assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info); + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))] @@ -5018,7 +5001,7 @@ pub mod bench { use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::{Block, BlockHeader, Transaction, TxOut}; - use std::sync::Mutex; + use std::sync::{Arc, Mutex}; use test::Bencher; @@ -5044,7 +5027,7 @@ pub mod bench { let network = bitcoin::Network::Testnet; let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash(); - let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())}; + let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))}; let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; let mut config: UserConfig = Default::default();