X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fgossip.rs;h=9c8fd40af1358f1e1c8ae04646607da68a4e7abe;hb=76fff953bd2cc86a932500036bb72a221ac31808;hp=3f9313c686aab4bd1454f8b8bda84f762afee36a;hpb=6ddf69c93b1c3e418251ed7a898efd943e47bc30;p=rust-lightning diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 3f9313c6..9c8fd40a 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -9,27 +9,27 @@ //! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers +use bitcoin::blockdata::constants::ChainHash; + use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; -use bitcoin::secp256k1::PublicKey; +use bitcoin::secp256k1::{PublicKey, Verification}; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; -use bitcoin::hash_types::BlockHash; - use bitcoin::network::constants::Network; -use bitcoin::blockdata::constants::genesis_block; +use crate::events::{MessageSendEvent, MessageSendEventsProvider}; +use crate::ln::ChannelId; use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures}; -use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; +use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT}; use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter}; use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use crate::ln::msgs; -use crate::routing::utxo::{self, UtxoLookup}; +use crate::routing::utxo::{self, UtxoLookup, UtxoResolver}; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, MaybeReadable}; use crate::util::logger::{Logger, Level}; -use crate::util::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; use crate::util::string::PrintableString; use crate::util::indexed_map::{IndexedMap, Entry as IndexedMapEntry}; @@ -38,11 +38,13 @@ use crate::io; use crate::io_extras::{copy, sink}; use crate::prelude::*; use core::{cmp, fmt}; -use crate::sync::{RwLock, RwLockReadGuard}; +use core::convert::TryFrom; +use crate::sync::{RwLock, RwLockReadGuard, LockTestExt}; #[cfg(feature = "std")] use core::sync::atomic::{AtomicUsize, Ordering}; use crate::sync::Mutex; use core::ops::{Bound, Deref}; +use core::str::FromStr; #[cfg(feature = "std")] use std::time::{SystemTime, UNIX_EPOCH}; @@ -76,16 +78,26 @@ impl NodeId { pub fn as_slice(&self) -> &[u8] { &self.0 } + + /// Get the public key as an array from this NodeId + pub fn as_array(&self) -> &[u8; PUBLIC_KEY_SIZE] { + &self.0 + } + + /// Get the public key from this NodeId + pub fn as_pubkey(&self) -> Result { + PublicKey::from_slice(&self.0) + } } impl fmt::Debug for NodeId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "NodeId({})", log_bytes!(self.0)) + write!(f, "NodeId({})", crate::util::logger::DebugBytes(&self.0)) } } impl fmt::Display for NodeId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", log_bytes!(self.0)) + crate::util::logger::DebugBytes(&self.0).fmt(f) } } @@ -130,11 +142,34 @@ impl Readable for NodeId { } } +impl From for NodeId { + fn from(pubkey: PublicKey) -> Self { + Self::from_pubkey(&pubkey) + } +} + +impl TryFrom for PublicKey { + type Error = secp256k1::Error; + + fn try_from(node_id: NodeId) -> Result { + node_id.as_pubkey() + } +} + +impl FromStr for NodeId { + type Err = hex::parse::HexToArrayError; + + fn from_str(s: &str) -> Result { + let data: [u8; PUBLIC_KEY_SIZE] = hex::FromHex::from_hex(s)?; + Ok(NodeId(data)) + } +} + /// Represents the network as nodes and channels between them pub struct NetworkGraph where L::Target: Logger { secp_ctx: Secp256k1, last_rapid_gossip_sync_timestamp: Mutex>, - genesis_hash: BlockHash, + chain_hash: ChainHash, logger: L, // Lock order: channels -> nodes channels: RwLock>, @@ -181,7 +216,7 @@ pub enum NetworkUpdate { msg: ChannelUpdate, }, /// An error indicating that a channel failed to route a payment, which should be applied via - /// [`NetworkGraph::channel_failed`]. + /// [`NetworkGraph::channel_failed_permanent`] if permanent. ChannelFailure { /// The short channel id of the closed channel. short_channel_id: u64, @@ -223,7 +258,7 @@ pub struct P2PGossipSync>, U: Deref, L: Deref> where U::Target: UtxoLookup, L::Target: Logger { network_graph: G, - utxo_lookup: Option, + utxo_lookup: RwLock>, #[cfg(feature = "std")] full_syncs_requested: AtomicUsize, pending_events: Mutex>, @@ -234,7 +269,7 @@ impl>, U: Deref, L: Deref> P2PGossipSync, logger: L) -> Self { @@ -242,7 +277,7 @@ where U::Target: UtxoLookup, L::Target: Logger network_graph, #[cfg(feature = "std")] full_syncs_requested: AtomicUsize::new(0), - utxo_lookup, + utxo_lookup: RwLock::new(utxo_lookup), pending_events: Mutex::new(vec![]), logger, } @@ -251,14 +286,14 @@ where U::Target: UtxoLookup, L::Target: Logger /// Adds a provider used to check new announcements. Does not affect /// existing announcements unless they are updated. /// Add, update or remove the provider would replace the current one. - pub fn add_utxo_lookup(&mut self, utxo_lookup: Option) { - self.utxo_lookup = utxo_lookup; + pub fn add_utxo_lookup(&self, utxo_lookup: Option) { + *self.utxo_lookup.write().unwrap() = utxo_lookup; } /// Gets a reference to the underlying [`NetworkGraph`] which was provided in /// [`P2PGossipSync::new`]. /// - /// (C-not exported) as bindings don't support a reference-to-a-reference yet + /// This is not exported to bindings users as bindings don't support a reference-to-a-reference yet pub fn network_graph(&self) -> &G { &self.network_graph } @@ -309,21 +344,24 @@ where U::Target: UtxoLookup, L::Target: Logger impl NetworkGraph where L::Target: Logger { /// Handles any network updates originating from [`Event`]s. + // + /// Note that this will skip applying any [`NetworkUpdate::ChannelUpdateMessage`] to avoid + /// leaking possibly identifying information of the sender to the public network. /// - /// [`Event`]: crate::util::events::Event + /// [`Event`]: crate::events::Event pub fn handle_network_update(&self, network_update: &NetworkUpdate) { match *network_update { NetworkUpdate::ChannelUpdateMessage { ref msg } => { let short_channel_id = msg.contents.short_channel_id; let is_enabled = msg.contents.flags & (1 << 1) != (1 << 1); let status = if is_enabled { "enabled" } else { "disabled" }; - log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}.", short_channel_id, status); - let _ = self.update_channel(msg); + log_debug!(self.logger, "Skipping application of a channel update from a payment failure. Channel {} is {}.", short_channel_id, status); }, NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => { - let action = if is_permanent { "Removing" } else { "Disabling" }; - log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", action, short_channel_id); - self.channel_failed(short_channel_id, is_permanent); + if is_permanent { + log_debug!(self.logger, "Removing channel graph entry for {} due to a payment failure.", short_channel_id); + self.channel_failed_permanent(short_channel_id); + } }, NetworkUpdate::NodeFailure { ref node_id, is_permanent } => { if is_permanent { @@ -334,6 +372,11 @@ impl NetworkGraph where L::Target: Logger { }, } } + + /// Gets the chain hash for this network graph. + pub fn get_chain_hash(&self) -> ChainHash { + self.chain_hash + } } macro_rules! secp_verify_sig { @@ -345,7 +388,7 @@ macro_rules! secp_verify_sig { err: format!("Invalid signature on {} message", $msg_type), action: ErrorAction::SendWarningMessage { msg: msgs::WarningMessage { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), data: format!("Invalid signature on {} message", $msg_type), }, log_level: Level::Trace, @@ -363,7 +406,7 @@ macro_rules! get_pubkey_from_node_id { err: format!("Invalid public key on {} message", $msg_type), action: ErrorAction::SendWarningMessage { msg: msgs::WarningMessage { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), data: format!("Invalid public key on {} message", $msg_type), }, log_level: Level::Trace @@ -372,6 +415,35 @@ macro_rules! get_pubkey_from_node_id { } } +fn message_sha256d_hash(msg: &M) -> Sha256dHash { + let mut engine = Sha256dHash::engine(); + msg.write(&mut engine).expect("In-memory structs should not fail to serialize"); + Sha256dHash::from_engine(engine) +} + +/// Verifies the signature of a [`NodeAnnouncement`]. +/// +/// Returns an error if it is invalid. +pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement"); + + Ok(()) +} + +/// Verifies all signatures included in a [`ChannelAnnouncement`]. +/// +/// Returns an error if one of the signatures is invalid. +pub fn verify_channel_announcement(msg: &ChannelAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), "channel_announcement"); + + Ok(()) +} + impl>, U: Deref, L: Deref> RoutingMessageHandler for P2PGossipSync where U::Target: UtxoLookup, L::Target: Logger { @@ -383,7 +455,7 @@ where U::Target: UtxoLookup, L::Target: Logger } fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { - self.network_graph.update_channel_from_announcement(msg, &self.utxo_lookup)?; + self.network_graph.update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } @@ -432,14 +504,20 @@ where U::Target: UtxoLookup, L::Target: Logger } /// Initiates a stateless sync of routing gossip information with a peer - /// using gossip_queries. The default strategy used by this implementation + /// using [`gossip_queries`]. The default strategy used by this implementation /// is to sync the full block range with several peers. /// - /// We should expect one or more reply_channel_range messages in response - /// to our query_channel_range. Each reply will enqueue a query_scid message + /// We should expect one or more [`reply_channel_range`] messages in response + /// to our [`query_channel_range`]. Each reply will enqueue a [`query_scid`] message /// to request gossip messages for each channel. The sync is considered complete - /// when the final reply_scids_end message is received, though we are not + /// when the final [`reply_scids_end`] message is received, though we are not /// tracking this directly. + /// + /// [`gossip_queries`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#query-messages + /// [`reply_channel_range`]: msgs::ReplyChannelRange + /// [`query_channel_range`]: msgs::QueryChannelRange + /// [`query_scid`]: msgs::QueryShortChannelIds + /// [`reply_scids_end`]: msgs::ReplyShortChannelIdsEnd fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init, _inbound: bool) -> Result<(), ()> { // We will only perform a sync with peers that support gossip_queries. if !init_msg.features.supports_gossip_queries() { @@ -514,7 +592,7 @@ where U::Target: UtxoLookup, L::Target: Logger pending_events.push(MessageSendEvent::SendGossipTimestampFilter { node_id: their_node_id.clone(), msg: GossipTimestampFilter { - chain_hash: self.network_graph.genesis_hash, + chain_hash: self.network_graph.chain_hash, first_timestamp: gossip_start_time as u32, // 2106 issue! timestamp_range: u32::max_value(), }, @@ -553,7 +631,7 @@ where U::Target: UtxoLookup, L::Target: Logger let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); // Per spec, we must reply to a query. Send an empty message when things are invalid. - if msg.chain_hash != self.network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { + if msg.chain_hash != self.network_graph.chain_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(MessageSendEvent::SendReplyChannelRange { node_id: their_node_id.clone(), @@ -792,31 +870,31 @@ impl ChannelInfo { /// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a /// returned `source`, or `None` if `target` is not one of the channel's counterparties. pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - let (direction, source) = { + let (direction, source, outbound) = { if target == &self.node_one { - (self.two_to_one.as_ref(), &self.node_two) + (self.two_to_one.as_ref(), &self.node_two, false) } else if target == &self.node_two { - (self.one_to_two.as_ref(), &self.node_one) + (self.one_to_two.as_ref(), &self.node_one, true) } else { return None; } }; - direction.map(|dir| (DirectedChannelInfo::new(self, dir), source)) + direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), source)) } /// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a /// returned `target`, or `None` if `source` is not one of the channel's counterparties. pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - let (direction, target) = { + let (direction, target, outbound) = { if source == &self.node_one { - (self.one_to_two.as_ref(), &self.node_two) + (self.one_to_two.as_ref(), &self.node_two, true) } else if source == &self.node_two { - (self.two_to_one.as_ref(), &self.node_one) + (self.two_to_one.as_ref(), &self.node_one, false) } else { return None; } }; - direction.map(|dir| (DirectedChannelInfo::new(self, dir), target)) + direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), target)) } /// Returns a [`ChannelUpdateInfo`] based on the direction implied by the channel_flag. @@ -833,7 +911,7 @@ impl ChannelInfo { impl fmt::Display for ChannelInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}", - log_bytes!(self.features.encode()), log_bytes!(self.node_one.as_slice()), self.one_to_two, log_bytes!(self.node_two.as_slice()), self.two_to_one)?; + log_bytes!(self.features.encode()), &self.node_one, self.one_to_two, &self.node_two, self.two_to_one)?; Ok(()) } } @@ -912,51 +990,55 @@ impl Readable for ChannelInfo { pub struct DirectedChannelInfo<'a> { channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, - htlc_maximum_msat: u64, - effective_capacity: EffectiveCapacity, + /// The direction this channel is in - if set, it indicates that we're traversing the channel + /// from [`ChannelInfo::node_one`] to [`ChannelInfo::node_two`]. + from_node_one: bool, } impl<'a> DirectedChannelInfo<'a> { #[inline] - fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo) -> Self { - let mut htlc_maximum_msat = direction.htlc_maximum_msat; - let capacity_msat = channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); - - let effective_capacity = match capacity_msat { - Some(capacity_msat) => { - htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat); - EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: htlc_maximum_msat } - }, - None => EffectiveCapacity::MaximumHTLC { amount_msat: htlc_maximum_msat }, - }; - - Self { - channel, direction, htlc_maximum_msat, effective_capacity - } + fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool) -> Self { + Self { channel, direction, from_node_one } } /// Returns information for the channel. #[inline] pub fn channel(&self) -> &'a ChannelInfo { self.channel } - /// Returns the maximum HTLC amount allowed over the channel in the direction. - #[inline] - pub fn htlc_maximum_msat(&self) -> u64 { - self.htlc_maximum_msat - } - /// Returns the [`EffectiveCapacity`] of the channel in the direction. /// /// This is either the total capacity from the funding transaction, if known, or the /// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known, /// otherwise. + #[inline] pub fn effective_capacity(&self) -> EffectiveCapacity { - self.effective_capacity + let mut htlc_maximum_msat = self.direction().htlc_maximum_msat; + let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); + + match capacity_msat { + Some(capacity_msat) => { + htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat); + EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat } + }, + None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat }, + } } /// Returns information for the direction. #[inline] pub(super) fn direction(&self) -> &'a ChannelUpdateInfo { self.direction } + + /// Returns the `node_id` of the source hop. + /// + /// Refers to the `node_id` forwarding the payment to the next hop. + #[inline] + pub(super) fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } } + + /// Returns the `node_id` of the target hop. + /// + /// Refers to the `node_id` receiving the payment from the previous hop. + #[inline] + pub(super) fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } } } impl<'a> fmt::Debug for DirectedChannelInfo<'a> { @@ -981,7 +1063,7 @@ pub enum EffectiveCapacity { liquidity_msat: u64, }, /// The maximum HTLC amount in one direction as advertised on the gossip network. - MaximumHTLC { + AdvertisedMaxHTLC { /// The maximum HTLC amount denominated in millisatoshi. amount_msat: u64, }, @@ -995,6 +1077,11 @@ pub enum EffectiveCapacity { /// A capacity sufficient to route any payment, typically used for private channels provided by /// an invoice. Infinite, + /// The maximum HTLC amount as provided by an invoice route hint. + HintMaxHTLC { + /// The maximum HTLC amount denominated in millisatoshi. + amount_msat: u64, + }, /// A capacity that is unknown possibly because either the chain state is unavailable to know /// the total capacity or the `htlc_maximum_msat` was not advertised on the gossip network. Unknown, @@ -1009,8 +1096,9 @@ impl EffectiveCapacity { pub fn as_msat(&self) -> u64 { match self { EffectiveCapacity::ExactLiquidity { liquidity_msat } => *liquidity_msat, - EffectiveCapacity::MaximumHTLC { amount_msat } => *amount_msat, + EffectiveCapacity::AdvertisedMaxHTLC { amount_msat } => *amount_msat, EffectiveCapacity::Total { capacity_msat, .. } => *capacity_msat, + EffectiveCapacity::HintMaxHTLC { amount_msat } => *amount_msat, EffectiveCapacity::Infinite => u64::max_value(), EffectiveCapacity::Unknown => UNKNOWN_CHANNEL_CAPACITY_MSAT, } @@ -1018,7 +1106,7 @@ impl EffectiveCapacity { } /// Fees for routing via a given channel or a node -#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)] +#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash, Ord, PartialOrd)] pub struct RoutingFees { /// Flat routing fee in millisatoshis. pub base_msat: u32, @@ -1046,8 +1134,6 @@ pub struct NodeAnnouncementInfo { /// May be invalid or malicious (eg control chars), /// should not be exposed to the user. pub alias: NodeAlias, - /// Internet-level addresses via which one can connect to the node - pub addresses: Vec, /// An initial announcement of the node /// Mostly redundant with the data we store in fields explicitly. /// Everything else is useful only for sending out for initial routing sync. @@ -1055,20 +1141,51 @@ pub struct NodeAnnouncementInfo { pub announcement_message: Option } -impl_writeable_tlv_based!(NodeAnnouncementInfo, { - (0, features, required), - (2, last_update, required), - (4, rgb, required), - (6, alias, required), - (8, announcement_message, option), - (10, addresses, vec_type), -}); +impl NodeAnnouncementInfo { + /// Internet-level addresses via which one can connect to the node + pub fn addresses(&self) -> &[SocketAddress] { + self.announcement_message.as_ref() + .map(|msg| msg.contents.addresses.as_slice()) + .unwrap_or_default() + } +} + +impl Writeable for NodeAnnouncementInfo { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + let empty_addresses = Vec::::new(); + write_tlv_fields!(writer, { + (0, self.features, required), + (2, self.last_update, required), + (4, self.rgb, required), + (6, self.alias, required), + (8, self.announcement_message, option), + (10, empty_addresses, required_vec), // Versions prior to 0.0.115 require this field + }); + Ok(()) + } +} + +impl Readable for NodeAnnouncementInfo { + fn read(reader: &mut R) -> Result { + _init_and_read_len_prefixed_tlv_fields!(reader, { + (0, features, required), + (2, last_update, required), + (4, rgb, required), + (6, alias, required), + (8, announcement_message, option), + (10, _addresses, optional_vec), // deprecated, not used anymore + }); + let _: Option> = _addresses; + Ok(Self { features: features.0.unwrap(), last_update: last_update.0.unwrap(), rgb: rgb.0.unwrap(), + alias: alias.0.unwrap(), announcement_message }) + } +} /// A user-defined name for a node, which may be used when displaying the node in a graph. /// /// Since node aliases are provided by third parties, they are a potential avenue for injection /// attacks. Care must be taken when processing. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub struct NodeAlias(pub [u8; 32]); impl fmt::Display for NodeAlias { @@ -1127,14 +1244,14 @@ impl Writeable for NodeInfo { write_tlv_fields!(writer, { // Note that older versions of LDK wrote the lowest inbound fees here at type 0 (2, self.announcement_info, option), - (4, self.channels, vec_type), + (4, self.channels, required_vec), }); Ok(()) } } -// A wrapper allowing for the optional deseralization of `NodeAnnouncementInfo`. Utilizing this is -// necessary to maintain compatibility with previous serializations of `NetAddress` that have an +// A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is +// necessary to maintain compatibility with previous serializations of `SocketAddress` that have an // invalid hostname set. We ignore and eat all errors until we are either able to read a // `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end. struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo); @@ -1158,19 +1275,17 @@ impl Readable for NodeInfo { // with zero inbound fees, causing that heuristic to provide little gain. Worse, because it // requires additional complexity and lookups during routing, it ends up being a // performance loss. Thus, we simply ignore the old field here and no longer track it. - let mut _lowest_inbound_channel_fees: Option = None; - let mut announcement_info_wrap: Option = None; - _init_tlv_field_var!(channels, vec_type); - - read_tlv_fields!(reader, { + _init_and_read_len_prefixed_tlv_fields!(reader, { (0, _lowest_inbound_channel_fees, option), (2, announcement_info_wrap, upgradable_option), - (4, channels, vec_type), + (4, channels, required_vec), }); + let _: Option = _lowest_inbound_channel_fees; + let announcement_info_wrap: Option = announcement_info_wrap; Ok(NodeInfo { announcement_info: announcement_info_wrap.map(|w| w.0), - channels: _init_tlv_based_struct_field!(channels, vec_type), + channels, }) } } @@ -1182,7 +1297,7 @@ impl Writeable for NetworkGraph where L::Target: Logger { fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); - self.genesis_hash.write(writer)?; + self.chain_hash.write(writer)?; let channels = self.channels.read().unwrap(); (channels.len() as u64).write(writer)?; for (ref chan_id, ref chan_info) in channels.unordered_iter() { @@ -1208,16 +1323,18 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { fn read(reader: &mut R, logger: L) -> Result, DecodeError> { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); - let genesis_hash: BlockHash = Readable::read(reader)?; + let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - let mut channels = IndexedMap::new(); + // In Nov, 2023 there were about 15,000 nodes; we cap allocations to 1.5x that. + let mut channels = IndexedMap::with_capacity(cmp::min(channels_count as usize, 22500)); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info = Readable::read(reader)?; channels.insert(chan_id, chan_info); } let nodes_count: u64 = Readable::read(reader)?; - let mut nodes = IndexedMap::new(); + // In Nov, 2023 there were about 69K channels; we cap allocations to 1.5x that. + let mut nodes = IndexedMap::with_capacity(cmp::min(nodes_count as usize, 103500)); for _ in 0..nodes_count { let node_id = Readable::read(reader)?; let node_info = Readable::read(reader)?; @@ -1231,7 +1348,7 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { Ok(NetworkGraph { secp_ctx: Secp256k1::verification_only(), - genesis_hash, + chain_hash, logger, channels: RwLock::new(channels), nodes: RwLock::new(nodes), @@ -1251,7 +1368,7 @@ impl fmt::Display for NetworkGraph where L::Target: Logger { } writeln!(f, "[Nodes]")?; for (&node_id, val) in self.nodes.read().unwrap().unordered_iter() { - writeln!(f, " {}: {}", log_bytes!(node_id.as_slice()), val)?; + writeln!(f, " {}: {}", &node_id, val)?; } Ok(()) } @@ -1260,9 +1377,14 @@ impl fmt::Display for NetworkGraph where L::Target: Logger { impl Eq for NetworkGraph where L::Target: Logger {} impl PartialEq for NetworkGraph where L::Target: Logger { fn eq(&self, other: &Self) -> bool { - self.genesis_hash == other.genesis_hash && - *self.channels.read().unwrap() == *other.channels.read().unwrap() && - *self.nodes.read().unwrap() == *other.nodes.read().unwrap() + // For a total lockorder, sort by position in memory and take the inner locks in that order. + // (Assumes that we can't move within memory while a lock is held). + let ord = ((self as *const _) as usize) < ((other as *const _) as usize); + let a = if ord { (&self.channels, &self.nodes) } else { (&other.channels, &other.nodes) }; + let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) }; + let (channels_a, channels_b) = (a.0.unsafe_well_ordered_double_lock_self(), b.0.unsafe_well_ordered_double_lock_self()); + let (nodes_a, nodes_b) = (a.1.unsafe_well_ordered_double_lock_self(), b.1.unsafe_well_ordered_double_lock_self()); + self.chain_hash.eq(&other.chain_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b) } } @@ -1271,7 +1393,7 @@ impl NetworkGraph where L::Target: Logger { pub fn new(network: Network, logger: L) -> NetworkGraph { Self { secp_ctx: Secp256k1::verification_only(), - genesis_hash: genesis_block(network).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(network), logger, channels: RwLock::new(IndexedMap::new()), nodes: RwLock::new(IndexedMap::new()), @@ -1320,8 +1442,7 @@ impl NetworkGraph where L::Target: Logger { /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. pub fn update_node_from_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<(), LightningError> { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement"); + verify_node_announcement(msg, &self.secp_ctx)?; self.update_node_from_announcement_intern(&msg.contents, Some(&msg)) } @@ -1361,8 +1482,7 @@ impl NetworkGraph where L::Target: Logger { features: msg.features.clone(), last_update: msg.timestamp, rgb: msg.rgb, - alias: NodeAlias(msg.alias), - addresses: msg.addresses.clone(), + alias: msg.alias, announcement_message: if should_relay { full_msg.cloned() } else { None }, }); @@ -1373,8 +1493,8 @@ impl NetworkGraph where L::Target: Logger { /// Store or update channel info from a channel announcement. /// - /// You probably don't want to call this directly, instead relying on a P2PGossipSync's - /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept + /// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s + /// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. /// /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify @@ -1385,14 +1505,23 @@ impl NetworkGraph where L::Target: Logger { where U::Target: UtxoLookup, { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement"); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement"); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement"); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), "channel_announcement"); + verify_channel_announcement(msg, &self.secp_ctx)?; self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup) } + /// Store or update channel info from a channel announcement. + /// + /// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s + /// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept + /// routing messages from a source using a protocol other than the lightning P2P protocol. + /// + /// This will skip verification of if the channel is actually on-chain. + pub fn update_channel_from_announcement_no_lookup( + &self, msg: &ChannelAnnouncement + ) -> Result<(), LightningError> { + self.update_channel_from_announcement::<&UtxoResolver>(msg, &None) + } + /// Store or update channel info from a channel announcement without verifying the associated /// signatures. Because we aren't given the associated signatures here we cannot relay the /// channel announcement to any of our peers. @@ -1442,6 +1571,8 @@ impl NetworkGraph where L::Target: Logger { let node_id_a = channel_info.node_one.clone(); let node_id_b = channel_info.node_two.clone(); + log_gossip!(self.logger, "Adding channel {} between nodes {} and {}", short_channel_id, node_id_a, node_id_b); + match channels.entry(short_channel_id) { IndexedMapEntry::Occupied(mut entry) => { //TODO: because asking the blockchain if short_channel_id is valid is only optional @@ -1494,6 +1625,13 @@ impl NetworkGraph where L::Target: Logger { return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); } + if msg.chain_hash != self.chain_hash { + return Err(LightningError { + err: "Channel announcement chain hash does not match genesis hash".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + }); + } + { let channels = self.channels.read().unwrap(); @@ -1567,40 +1705,27 @@ impl NetworkGraph where L::Target: Logger { Ok(()) } - /// Marks a channel in the graph as failed if a corresponding HTLC fail was sent. - /// If permanent, removes a channel from the local storage. - /// May cause the removal of nodes too, if this was their last channel. - /// If not permanent, makes channels unavailable for routing. - pub fn channel_failed(&self, short_channel_id: u64, is_permanent: bool) { + /// Marks a channel in the graph as failed permanently. + /// + /// The channel and any node for which this was their last channel are removed from the graph. + pub fn channel_failed_permanent(&self, short_channel_id: u64) { #[cfg(feature = "std")] let current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs()); #[cfg(not(feature = "std"))] let current_time_unix = None; - self.channel_failed_with_time(short_channel_id, is_permanent, current_time_unix) + self.channel_failed_permanent_with_time(short_channel_id, current_time_unix) } - /// Marks a channel in the graph as failed if a corresponding HTLC fail was sent. - /// If permanent, removes a channel from the local storage. - /// May cause the removal of nodes too, if this was their last channel. - /// If not permanent, makes channels unavailable for routing. - fn channel_failed_with_time(&self, short_channel_id: u64, is_permanent: bool, current_time_unix: Option) { + /// Marks a channel in the graph as failed permanently. + /// + /// The channel and any node for which this was their last channel are removed from the graph. + fn channel_failed_permanent_with_time(&self, short_channel_id: u64, current_time_unix: Option) { let mut channels = self.channels.write().unwrap(); - if is_permanent { - if let Some(chan) = channels.remove(&short_channel_id) { - let mut nodes = self.nodes.write().unwrap(); - self.removed_channels.lock().unwrap().insert(short_channel_id, current_time_unix); - Self::remove_channel_in_nodes(&mut nodes, &chan, short_channel_id); - } - } else { - if let Some(chan) = channels.get_mut(&short_channel_id) { - if let Some(one_to_two) = chan.one_to_two.as_mut() { - one_to_two.enabled = false; - } - if let Some(two_to_one) = chan.two_to_one.as_mut() { - two_to_one.enabled = false; - } - } + if let Some(chan) = channels.remove(&short_channel_id) { + let mut nodes = self.nodes.write().unwrap(); + self.removed_channels.lock().unwrap().insert(short_channel_id, current_time_unix); + Self::remove_channel_in_nodes(&mut nodes, &chan, short_channel_id); } } @@ -1683,16 +1808,23 @@ impl NetworkGraph where L::Target: Logger { let mut scids_to_remove = Vec::new(); for (scid, info) in channels.unordered_iter_mut() { if info.one_to_two.is_some() && info.one_to_two.as_ref().unwrap().last_update < min_time_unix { + log_gossip!(self.logger, "Removing directional update one_to_two (0) for channel {} due to its timestamp {} being below {}", + scid, info.one_to_two.as_ref().unwrap().last_update, min_time_unix); info.one_to_two = None; } if info.two_to_one.is_some() && info.two_to_one.as_ref().unwrap().last_update < min_time_unix { + log_gossip!(self.logger, "Removing directional update two_to_one (1) for channel {} due to its timestamp {} being below {}", + scid, info.two_to_one.as_ref().unwrap().last_update, min_time_unix); info.two_to_one = None; } if info.one_to_two.is_none() || info.two_to_one.is_none() { // We check the announcement_received_time here to ensure we don't drop // announcements that we just received and are just waiting for our peer to send a // channel_update for. - if info.announcement_received_time < min_time_unix as u64 { + let announcement_received_timestamp = info.announcement_received_time; + if announcement_received_timestamp < min_time_unix as u64 { + log_gossip!(self.logger, "Removing channel {} because both directional updates are missing and its announcement timestamp {} being below {}", + scid, announcement_received_timestamp, min_time_unix); scids_to_remove.push(*scid); } } @@ -1713,7 +1845,7 @@ impl NetworkGraph where L::Target: Logger { // NOTE: In the case of no-std, we won't have access to the current UNIX time at the time of removal, // so we'll just set the removal time here to the current UNIX time on the very next invocation // of this function. - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] { let mut tracked_time = Some(current_time_unix); core::mem::swap(time, &mut tracked_time); @@ -1730,14 +1862,14 @@ impl NetworkGraph where L::Target: Logger { /// For an already known (from announcement) channel, update info about one of the directions /// of the channel. /// - /// You probably don't want to call this directly, instead relying on a P2PGossipSync's - /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept + /// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s + /// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. /// /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. pub fn update_channel(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> { - self.update_channel_intern(&msg.contents, Some(&msg), Some(&msg.signature)) + self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), false) } /// For an already known (from announcement) channel, update info about one of the directions @@ -1747,12 +1879,32 @@ impl NetworkGraph where L::Target: Logger { /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. pub fn update_channel_unsigned(&self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { - self.update_channel_intern(msg, None, None) + self.update_channel_internal(msg, None, None, false) } - fn update_channel_intern(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>) -> Result<(), LightningError> { + /// For an already known (from announcement) channel, verify the given [`ChannelUpdate`]. + /// + /// This checks whether the update currently is applicable by [`Self::update_channel`]. + /// + /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or + /// materially in the future will be rejected. + pub fn verify_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> { + self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), true) + } + + fn update_channel_internal(&self, msg: &msgs::UnsignedChannelUpdate, + full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>, + only_verify: bool) -> Result<(), LightningError> + { let chan_enabled = msg.flags & (1 << 1) != (1 << 1); + if msg.chain_hash != self.chain_hash { + return Err(LightningError { + err: "Channel update chain hash does not match genesis hash".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + }); + } + #[cfg(all(feature = "std", not(test), not(feature = "_test_utils")))] { // Note that many tests rely on being able to set arbitrarily old timestamps, thus we @@ -1766,6 +1918,8 @@ impl NetworkGraph where L::Target: Logger { } } + log_gossip!(self.logger, "Updating channel {} in direction {} with timestamp {}", msg.short_channel_id, msg.flags & 1, msg.timestamp); + let mut channels = self.channels.write().unwrap(); match channels.get_mut(&msg.short_channel_id) { None => { @@ -1828,7 +1982,7 @@ impl NetworkGraph where L::Target: Logger { } } } - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]); if msg.flags & 1 == 1 { check_update_latest!(channel.two_to_one); if let Some(sig) = sig { @@ -1837,7 +1991,9 @@ impl NetworkGraph where L::Target: Logger { action: ErrorAction::IgnoreAndLog(Level::Debug) })?, "channel_update"); } - channel.two_to_one = get_new_channel_info!(); + if !only_verify { + channel.two_to_one = get_new_channel_info!(); + } } else { check_update_latest!(channel.one_to_two); if let Some(sig) = sig { @@ -1846,7 +2002,9 @@ impl NetworkGraph where L::Target: Logger { action: ErrorAction::IgnoreAndLog(Level::Debug) })?, "channel_update"); } - channel.one_to_two = get_new_channel_info!(); + if !only_verify { + channel.one_to_two = get_new_channel_info!(); + } } } } @@ -1878,7 +2036,7 @@ impl NetworkGraph where L::Target: Logger { impl ReadOnlyNetworkGraph<'_> { /// Returns all known valid channels' short ids along with announced channel info. /// - /// (C-not exported) because we don't want to return lifetime'd references + /// This is not exported to bindings users because we don't want to return lifetime'd references pub fn channels(&self) -> &IndexedMap { &*self.channels } @@ -1896,7 +2054,7 @@ impl ReadOnlyNetworkGraph<'_> { /// Returns all known nodes' public keys along with announced node info. /// - /// (C-not exported) because we don't want to return lifetime'd references + /// This is not exported to bindings users because we don't want to return lifetime'd references pub fn nodes(&self) -> &IndexedMap { &*self.nodes } @@ -1915,18 +2073,15 @@ impl ReadOnlyNetworkGraph<'_> { /// Get network addresses by node id. /// Returns None if the requested node is completely unknown, /// or if node announcement for the node was never received. - pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { - if let Some(node) = self.nodes.get(&NodeId::from_pubkey(&pubkey)) { - if let Some(node_info) = node.announcement_info.as_ref() { - return Some(node_info.addresses.clone()) - } - } - None + pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { + self.nodes.get(&NodeId::from_pubkey(&pubkey)) + .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec())) } } #[cfg(test)] pub(crate) mod tests { + use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::ln::channelmanager; use crate::ln::chan_utils::make_funding_redeemscript; #[cfg(feature = "std")] @@ -1938,8 +2093,7 @@ pub(crate) mod tests { ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; use crate::util::config::UserConfig; use crate::util::test_utils; - use crate::util::ser::{ReadableArgs, Writeable}; - use crate::util::events::{MessageSendEvent, MessageSendEventsProvider}; + use crate::util::ser::{ReadableArgs, Readable, Writeable}; use crate::util::scid_utils::scid_from_parts; use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS; @@ -1947,13 +2101,11 @@ pub(crate) mod tests { use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; + use bitcoin::hashes::hex::FromHex; use bitcoin::network::constants::Network; - use bitcoin::blockdata::constants::genesis_block; - use bitcoin::blockdata::script::Script; + use bitcoin::blockdata::constants::ChainHash; + use bitcoin::blockdata::script::ScriptBuf; use bitcoin::blockdata::transaction::TxOut; - - use hex; - use bitcoin::secp256k1::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; @@ -1982,7 +2134,7 @@ pub(crate) mod tests { fn request_full_sync_finite_times() { let network_graph = create_network_graph(); let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); - let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); + let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); assert!(gossip_sync.should_request_full_sync(&node_id)); assert!(gossip_sync.should_request_full_sync(&node_id)); @@ -1999,7 +2151,7 @@ pub(crate) mod tests { timestamp: 100, node_id, rgb: [0; 3], - alias: [0; 32], + alias: NodeAlias([0; 32]), addresses: Vec::new(), excess_address_data: Vec::new(), excess_data: Vec::new(), @@ -2020,7 +2172,7 @@ pub(crate) mod tests { let mut unsigned_announcement = UnsignedChannelAnnouncement { features: channelmanager::provided_channel_features(&UserConfig::default()), - chain_hash: genesis_block(Network::Testnet).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 0, node_id_1: NodeId::from_pubkey(&node_id_1), node_id_2: NodeId::from_pubkey(&node_id_2), @@ -2039,7 +2191,7 @@ pub(crate) mod tests { } } - pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> Script { + pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> ScriptBuf { let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap(); make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey), @@ -2048,7 +2200,7 @@ pub(crate) mod tests { pub(crate) fn get_signed_channel_update(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelUpdate { let mut unsigned_channel_update = UnsignedChannelUpdate { - chain_hash: genesis_block(Network::Testnet).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 0, timestamp: 100, flags: 0, @@ -2096,7 +2248,7 @@ pub(crate) mod tests { Err(_) => panic!() }; - let fake_msghash = hash_to_message!(&zero_hash); + let fake_msghash = hash_to_message!(zero_hash.as_byte_array()); match gossip_sync.handle_node_announcement( &NodeAnnouncement { signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey), @@ -2249,6 +2401,16 @@ pub(crate) mod tests { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself") }; + + // Test that channel announcements with the wrong chain hash are ignored (network graph is testnet, + // announcement is mainnet). + let incorrect_chain_announcement = get_signed_channel_announcement(|unsigned_announcement| { + unsigned_announcement.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); + }, node_1_privkey, node_2_privkey, &secp_ctx); + match gossip_sync.handle_channel_announcement(&incorrect_chain_announcement) { + Ok(_) => panic!(), + Err(e) => assert_eq!(e.err, "Channel announcement chain hash does not match genesis hash") + }; } #[test] @@ -2281,6 +2443,7 @@ pub(crate) mod tests { } let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); + network_graph.verify_channel_update(&valid_channel_update).unwrap(); match gossip_sync.handle_channel_update(&valid_channel_update) { Ok(res) => assert!(res), _ => panic!(), @@ -2347,12 +2510,23 @@ pub(crate) mod tests { unsigned_channel_update.timestamp += 500; }, node_1_privkey, &secp_ctx); let zero_hash = Sha256dHash::hash(&[0; 32]); - let fake_msghash = hash_to_message!(&zero_hash); + let fake_msghash = hash_to_message!(zero_hash.as_byte_array()); invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey); match gossip_sync.handle_channel_update(&invalid_sig_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message") }; + + // Test that channel updates with the wrong chain hash are ignored (network graph is testnet, channel + // update is mainet). + let incorrect_chain_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); + }, node_1_privkey, &secp_ctx); + + match gossip_sync.handle_channel_update(&incorrect_chain_update) { + Ok(_) => panic!(), + Err(e) => assert_eq!(e.err, "Channel update chain hash does not match genesis hash") + }; } #[test] @@ -2372,7 +2546,8 @@ pub(crate) mod tests { let short_channel_id; { - // Announce a channel we will update + // Check we won't apply an update via `handle_network_update` for privacy reasons, but + // can continue fine if we manually apply it. let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); short_channel_id = valid_channel_announcement.contents.short_channel_id; let chain_source: Option<&test_utils::TestChainSource> = None; @@ -2383,13 +2558,14 @@ pub(crate) mod tests { assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); network_graph.handle_network_update(&NetworkUpdate::ChannelUpdateMessage { - msg: valid_channel_update, + msg: valid_channel_update.clone(), }); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); + assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); + network_graph.update_channel(&valid_channel_update).unwrap(); } - // Non-permanent closing just disables a channel + // Non-permanent failure doesn't touch the channel at all { match network_graph.read_only().channels().get(&short_channel_id) { None => panic!(), @@ -2406,7 +2582,7 @@ pub(crate) mod tests { match network_graph.read_only().channels().get(&short_channel_id) { None => panic!(), Some(channel_info) => { - assert!(!channel_info.one_to_two.as_ref().unwrap().enabled); + assert!(channel_info.one_to_two.as_ref().unwrap().enabled); } }; } @@ -2540,7 +2716,7 @@ pub(crate) mod tests { // Mark the channel as permanently failed. This will also remove the two nodes // and all of the entries will be tracked as removed. - network_graph.channel_failed_with_time(short_channel_id, true, Some(tracking_time)); + network_graph.channel_failed_permanent_with_time(short_channel_id, Some(tracking_time)); // Should not remove from tracking if insufficient time has passed network_graph.remove_stale_channels_and_tracking_with_time( @@ -2573,7 +2749,7 @@ pub(crate) mod tests { // Mark the channel as permanently failed. This will also remove the two nodes // and all of the entries will be tracked as removed. - network_graph.channel_failed(short_channel_id, true); + network_graph.channel_failed_permanent(short_channel_id); // The first time we call the following, the channel will have a removal time assigned. network_graph.remove_stale_channels_and_tracking_with_time(removal_time); @@ -2784,11 +2960,11 @@ pub(crate) mod tests { let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); // It should ignore if gossip_queries feature is not enabled { - let init_msg = Init { features: InitFeatures::empty(), remote_network_address: None }; + let init_msg = Init { features: InitFeatures::empty(), networks: None, remote_network_address: None }; gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap(); let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); @@ -2798,7 +2974,7 @@ pub(crate) mod tests { { let mut features = InitFeatures::empty(); features.set_gossip_queries_optional(); - let init_msg = Init { features, remote_network_address: None }; + let init_msg = Init { features, networks: None, remote_network_address: None }; gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap(); let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2821,7 +2997,7 @@ pub(crate) mod tests { let network_graph = create_network_graph(); let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); @@ -2873,13 +3049,13 @@ pub(crate) mod tests { &gossip_sync, &node_id_2, QueryChannelRange { - chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Bitcoin), first_blocknum: 0, number_of_blocks: 0xffff_ffff, }, false, vec![ReplyChannelRange { - chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Bitcoin), first_blocknum: 0, number_of_blocks: 0xffff_ffff, sync_complete: true, @@ -3114,7 +3290,7 @@ pub(crate) mod tests { let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); let result = gossip_sync.handle_query_short_channel_ids(&node_id, QueryShortChannelIds { chain_hash, @@ -3173,16 +3349,16 @@ pub(crate) mod tests { assert_eq!(chan_update_info, read_chan_update_info); // Check the serialization hasn't changed. - let legacy_chan_update_info_with_some: Vec = hex::decode("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap(); + let legacy_chan_update_info_with_some: Vec = >::from_hex("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap(); assert_eq!(encoded_chan_update_info, legacy_chan_update_info_with_some); // Check we fail if htlc_maximum_msat is not present in either the ChannelUpdateInfo itself // or the ChannelUpdate enclosed with `last_update_message`. - let legacy_chan_update_info_with_some_and_fail_update: Vec = hex::decode("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap(); + let legacy_chan_update_info_with_some_and_fail_update: Vec = >::from_hex("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap(); let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_some_and_fail_update.as_slice()); assert!(read_chan_update_info_res.is_err()); - let legacy_chan_update_info_with_none: Vec = hex::decode("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap(); + let legacy_chan_update_info_with_none: Vec = >::from_hex("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap(); let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice()); assert!(read_chan_update_info_res.is_err()); @@ -3224,18 +3400,18 @@ pub(crate) mod tests { assert_eq!(chan_info_some_updates, read_chan_info); // Check the serialization hasn't changed. - let legacy_chan_info_with_some: Vec = hex::decode("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); + let legacy_chan_info_with_some: Vec = >::from_hex("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); assert_eq!(encoded_chan_info, legacy_chan_info_with_some); // Check we can decode legacy ChannelInfo, even if the `two_to_one` / `one_to_two` / // `last_update_message` fields fail to decode due to missing htlc_maximum_msat. - let legacy_chan_info_with_some_and_fail_update = hex::decode("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap(); + let legacy_chan_info_with_some_and_fail_update = >::from_hex("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap(); let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_some_and_fail_update.as_slice()).unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); assert_eq!(read_chan_info.two_to_one, None); - let legacy_chan_info_with_none: Vec = hex::decode("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); + let legacy_chan_info_with_none: Vec = >::from_hex("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); @@ -3244,26 +3420,25 @@ pub(crate) mod tests { #[test] fn node_info_is_readable() { - use std::convert::TryFrom; - // 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one - let valid_netaddr = crate::ln::msgs::NetAddress::Hostname { hostname: crate::util::ser::Hostname::try_from("A".to_string()).unwrap(), port: 1234 }; + let announcement_message = >::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); + let announcement_message = NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap(); let valid_node_ann_info = NodeAnnouncementInfo { features: channelmanager::provided_node_features(&UserConfig::default()), last_update: 0, rgb: [0u8; 3], alias: NodeAlias([0u8; 32]), - addresses: vec![valid_netaddr], - announcement_message: None, + announcement_message: Some(announcement_message) }; let mut encoded_valid_node_ann_info = Vec::new(); assert!(valid_node_ann_info.write(&mut encoded_valid_node_ann_info).is_ok()); - let read_valid_node_ann_info: NodeAnnouncementInfo = crate::util::ser::Readable::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap(); + let read_valid_node_ann_info = NodeAnnouncementInfo::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap(); assert_eq!(read_valid_node_ann_info, valid_node_ann_info); + assert_eq!(read_valid_node_ann_info.addresses().len(), 1); - let encoded_invalid_node_ann_info = hex::decode("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); - let read_invalid_node_ann_info_res: Result = crate::util::ser::Readable::read(&mut encoded_invalid_node_ann_info.as_slice()); + let encoded_invalid_node_ann_info = >::from_hex("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); + let read_invalid_node_ann_info_res = NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice()); assert!(read_invalid_node_ann_info_res.is_err()); // 2. Check we can read a NodeInfo anyways, but set the NodeAnnouncementInfo to None if invalid @@ -3274,40 +3449,52 @@ pub(crate) mod tests { let mut encoded_valid_node_info = Vec::new(); assert!(valid_node_info.write(&mut encoded_valid_node_info).is_ok()); - let read_valid_node_info: NodeInfo = crate::util::ser::Readable::read(&mut encoded_valid_node_info.as_slice()).unwrap(); + let read_valid_node_info = NodeInfo::read(&mut encoded_valid_node_info.as_slice()).unwrap(); assert_eq!(read_valid_node_info, valid_node_info); - let encoded_invalid_node_info_hex = hex::decode("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); - let read_invalid_node_info: NodeInfo = crate::util::ser::Readable::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap(); + let encoded_invalid_node_info_hex = >::from_hex("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); + let read_invalid_node_info = NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap(); assert_eq!(read_invalid_node_info.announcement_info, None); } + + #[test] + fn test_node_info_keeps_compatibility() { + let old_ann_info_with_addresses = >::from_hex("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); + let ann_info_with_addresses = NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice()) + .expect("to be able to read an old NodeAnnouncementInfo with addresses"); + // This serialized info has an address field but no announcement_message, therefore the addresses returned by our function will still be empty + assert!(ann_info_with_addresses.addresses().is_empty()); + } + + #[test] + fn test_node_id_display() { + let node_id = NodeId([42; 33]); + assert_eq!(format!("{}", &node_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"); + } } -#[cfg(all(test, feature = "_bench_unstable"))] -mod benches { +#[cfg(ldk_bench)] +pub mod benches { use super::*; - - use test::Bencher; use std::io::Read; + use criterion::{black_box, Criterion}; - #[bench] - fn read_network_graph(bench: &mut Bencher) { + pub fn read_network_graph(bench: &mut Criterion) { let logger = crate::util::test_utils::TestLogger::new(); let mut d = crate::routing::router::bench_utils::get_route_file().unwrap(); let mut v = Vec::new(); d.read_to_end(&mut v).unwrap(); - bench.iter(|| { - let _ = NetworkGraph::read(&mut std::io::Cursor::new(&v), &logger).unwrap(); - }); + bench.bench_function("read_network_graph", |b| b.iter(|| + NetworkGraph::read(&mut std::io::Cursor::new(black_box(&v)), &logger).unwrap() + )); } - #[bench] - fn write_network_graph(bench: &mut Bencher) { + pub fn write_network_graph(bench: &mut Criterion) { let logger = crate::util::test_utils::TestLogger::new(); let mut d = crate::routing::router::bench_utils::get_route_file().unwrap(); let net_graph = NetworkGraph::read(&mut d, &logger).unwrap(); - bench.iter(|| { - let _ = net_graph.encode(); - }); + bench.bench_function("write_network_graph", |b| b.iter(|| + black_box(&net_graph).encode() + )); } }