X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fgossip.rs;h=045772486ba7aa150636a1e7b5420a606e87ce64;hb=650caa099d4432205cee2e1f5d83d5a846148962;hp=11bf532e90fc40388f0debf30968fa5c6cb813bd;hpb=c1825672ed27839e95bb42bf8e75c9068da585d8;p=rust-lightning diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 11bf532e..04577248 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -9,22 +9,21 @@ //! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers +use bitcoin::blockdata::constants::ChainHash; + use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; -use bitcoin::secp256k1::PublicKey; +use bitcoin::secp256k1::{PublicKey, Verification}; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; -use bitcoin::hashes::hex::FromHex; -use bitcoin::hash_types::BlockHash; - use bitcoin::network::constants::Network; -use bitcoin::blockdata::constants::genesis_block; use crate::events::{MessageSendEvent, MessageSendEventsProvider}; +use crate::ln::ChannelId; use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures}; -use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; +use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT}; use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter}; use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use crate::ln::msgs; @@ -40,7 +39,7 @@ use crate::io_extras::{copy, sink}; use crate::prelude::*; use core::{cmp, fmt}; use core::convert::TryFrom; -use crate::sync::{RwLock, RwLockReadGuard}; +use crate::sync::{RwLock, RwLockReadGuard, LockTestExt}; #[cfg(feature = "std")] use core::sync::atomic::{AtomicUsize, Ordering}; use crate::sync::Mutex; @@ -75,11 +74,26 @@ impl NodeId { NodeId(pubkey.serialize()) } + /// Create a new NodeId from a slice of bytes + pub fn from_slice(bytes: &[u8]) -> Result { + if bytes.len() != PUBLIC_KEY_SIZE { + return Err(DecodeError::InvalidValue); + } + let mut data = [0; PUBLIC_KEY_SIZE]; + data.copy_from_slice(bytes); + Ok(NodeId(data)) + } + /// Get the public key slice from this NodeId pub fn as_slice(&self) -> &[u8] { &self.0 } + /// Get the public key as an array from this NodeId + pub fn as_array(&self) -> &[u8; PUBLIC_KEY_SIZE] { + &self.0 + } + /// Get the public key from this NodeId pub fn as_pubkey(&self) -> Result { PublicKey::from_slice(&self.0) @@ -88,12 +102,12 @@ impl NodeId { impl fmt::Debug for NodeId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "NodeId({})", log_bytes!(self.0)) + write!(f, "NodeId({})", crate::util::logger::DebugBytes(&self.0)) } } impl fmt::Display for NodeId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", log_bytes!(self.0)) + crate::util::logger::DebugBytes(&self.0).fmt(f) } } @@ -153,10 +167,10 @@ impl TryFrom for PublicKey { } impl FromStr for NodeId { - type Err = bitcoin::hashes::hex::Error; + type Err = hex::parse::HexToArrayError; fn from_str(s: &str) -> Result { - let data: [u8; PUBLIC_KEY_SIZE] = FromHex::from_hex(s)?; + let data: [u8; PUBLIC_KEY_SIZE] = hex::FromHex::from_hex(s)?; Ok(NodeId(data)) } } @@ -165,7 +179,7 @@ impl FromStr for NodeId { pub struct NetworkGraph where L::Target: Logger { secp_ctx: Secp256k1, last_rapid_gossip_sync_timestamp: Mutex>, - genesis_hash: BlockHash, + chain_hash: ChainHash, logger: L, // Lock order: channels -> nodes channels: RwLock>, @@ -254,7 +268,7 @@ pub struct P2PGossipSync>, U: Deref, L: Deref> where U::Target: UtxoLookup, L::Target: Logger { network_graph: G, - utxo_lookup: Option, + utxo_lookup: RwLock>, #[cfg(feature = "std")] full_syncs_requested: AtomicUsize, pending_events: Mutex>, @@ -273,7 +287,7 @@ where U::Target: UtxoLookup, L::Target: Logger network_graph, #[cfg(feature = "std")] full_syncs_requested: AtomicUsize::new(0), - utxo_lookup, + utxo_lookup: RwLock::new(utxo_lookup), pending_events: Mutex::new(vec![]), logger, } @@ -282,8 +296,8 @@ where U::Target: UtxoLookup, L::Target: Logger /// Adds a provider used to check new announcements. Does not affect /// existing announcements unless they are updated. /// Add, update or remove the provider would replace the current one. - pub fn add_utxo_lookup(&mut self, utxo_lookup: Option) { - self.utxo_lookup = utxo_lookup; + pub fn add_utxo_lookup(&self, utxo_lookup: Option) { + *self.utxo_lookup.write().unwrap() = utxo_lookup; } /// Gets a reference to the underlying [`NetworkGraph`] which was provided in @@ -340,6 +354,9 @@ where U::Target: UtxoLookup, L::Target: Logger impl NetworkGraph where L::Target: Logger { /// Handles any network updates originating from [`Event`]s. + // + /// Note that this will skip applying any [`NetworkUpdate::ChannelUpdateMessage`] to avoid + /// leaking possibly identifying information of the sender to the public network. /// /// [`Event`]: crate::events::Event pub fn handle_network_update(&self, network_update: &NetworkUpdate) { @@ -348,8 +365,7 @@ impl NetworkGraph where L::Target: Logger { let short_channel_id = msg.contents.short_channel_id; let is_enabled = msg.contents.flags & (1 << 1) != (1 << 1); let status = if is_enabled { "enabled" } else { "disabled" }; - log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}.", short_channel_id, status); - let _ = self.update_channel(msg); + log_debug!(self.logger, "Skipping application of a channel update from a payment failure. Channel {} is {}.", short_channel_id, status); }, NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => { if is_permanent { @@ -366,6 +382,11 @@ impl NetworkGraph where L::Target: Logger { }, } } + + /// Gets the chain hash for this network graph. + pub fn get_chain_hash(&self) -> ChainHash { + self.chain_hash + } } macro_rules! secp_verify_sig { @@ -377,7 +398,7 @@ macro_rules! secp_verify_sig { err: format!("Invalid signature on {} message", $msg_type), action: ErrorAction::SendWarningMessage { msg: msgs::WarningMessage { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), data: format!("Invalid signature on {} message", $msg_type), }, log_level: Level::Trace, @@ -395,7 +416,7 @@ macro_rules! get_pubkey_from_node_id { err: format!("Invalid public key on {} message", $msg_type), action: ErrorAction::SendWarningMessage { msg: msgs::WarningMessage { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), data: format!("Invalid public key on {} message", $msg_type), }, log_level: Level::Trace @@ -404,6 +425,35 @@ macro_rules! get_pubkey_from_node_id { } } +fn message_sha256d_hash(msg: &M) -> Sha256dHash { + let mut engine = Sha256dHash::engine(); + msg.write(&mut engine).expect("In-memory structs should not fail to serialize"); + Sha256dHash::from_engine(engine) +} + +/// Verifies the signature of a [`NodeAnnouncement`]. +/// +/// Returns an error if it is invalid. +pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement"); + + Ok(()) +} + +/// Verifies all signatures included in a [`ChannelAnnouncement`]. +/// +/// Returns an error if one of the signatures is invalid. +pub fn verify_channel_announcement(msg: &ChannelAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), "channel_announcement"); + + Ok(()) +} + impl>, U: Deref, L: Deref> RoutingMessageHandler for P2PGossipSync where U::Target: UtxoLookup, L::Target: Logger { @@ -415,7 +465,7 @@ where U::Target: UtxoLookup, L::Target: Logger } fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { - self.network_graph.update_channel_from_announcement(msg, &self.utxo_lookup)?; + self.network_graph.update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } @@ -552,7 +602,7 @@ where U::Target: UtxoLookup, L::Target: Logger pending_events.push(MessageSendEvent::SendGossipTimestampFilter { node_id: their_node_id.clone(), msg: GossipTimestampFilter { - chain_hash: self.network_graph.genesis_hash, + chain_hash: self.network_graph.chain_hash, first_timestamp: gossip_start_time as u32, // 2106 issue! timestamp_range: u32::max_value(), }, @@ -591,7 +641,7 @@ where U::Target: UtxoLookup, L::Target: Logger let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); // Per spec, we must reply to a query. Send an empty message when things are invalid. - if msg.chain_hash != self.network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { + if msg.chain_hash != self.network_graph.chain_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(MessageSendEvent::SendReplyChannelRange { node_id: their_node_id.clone(), @@ -658,7 +708,7 @@ where U::Target: UtxoLookup, L::Target: Logger // Prior replies should use the number of blocks that fit into the reply. Overflow // safe since first_blocknum is always <= last SCID's block. else { - (false, block_from_scid(batch.last().unwrap()) - first_blocknum) + (false, block_from_scid(*batch.last().unwrap()) - first_blocknum) }; prev_batch_endblock = first_blocknum + number_of_blocks; @@ -830,31 +880,31 @@ impl ChannelInfo { /// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a /// returned `source`, or `None` if `target` is not one of the channel's counterparties. pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - let (direction, source) = { + let (direction, source, outbound) = { if target == &self.node_one { - (self.two_to_one.as_ref(), &self.node_two) + (self.two_to_one.as_ref(), &self.node_two, false) } else if target == &self.node_two { - (self.one_to_two.as_ref(), &self.node_one) + (self.one_to_two.as_ref(), &self.node_one, true) } else { return None; } }; - direction.map(|dir| (DirectedChannelInfo::new(self, dir), source)) + direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), source)) } /// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a /// returned `target`, or `None` if `source` is not one of the channel's counterparties. pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - let (direction, target) = { + let (direction, target, outbound) = { if source == &self.node_one { - (self.one_to_two.as_ref(), &self.node_two) + (self.one_to_two.as_ref(), &self.node_two, true) } else if source == &self.node_two { - (self.two_to_one.as_ref(), &self.node_one) + (self.two_to_one.as_ref(), &self.node_one, false) } else { return None; } }; - direction.map(|dir| (DirectedChannelInfo::new(self, dir), target)) + direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), target)) } /// Returns a [`ChannelUpdateInfo`] based on the direction implied by the channel_flag. @@ -871,7 +921,7 @@ impl ChannelInfo { impl fmt::Display for ChannelInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}", - log_bytes!(self.features.encode()), log_bytes!(self.node_one.as_slice()), self.one_to_two, log_bytes!(self.node_two.as_slice()), self.two_to_one)?; + log_bytes!(self.features.encode()), &self.node_one, self.one_to_two, &self.node_two, self.two_to_one)?; Ok(()) } } @@ -950,51 +1000,55 @@ impl Readable for ChannelInfo { pub struct DirectedChannelInfo<'a> { channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, - htlc_maximum_msat: u64, - effective_capacity: EffectiveCapacity, + /// The direction this channel is in - if set, it indicates that we're traversing the channel + /// from [`ChannelInfo::node_one`] to [`ChannelInfo::node_two`]. + from_node_one: bool, } impl<'a> DirectedChannelInfo<'a> { #[inline] - fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo) -> Self { - let mut htlc_maximum_msat = direction.htlc_maximum_msat; - let capacity_msat = channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); - - let effective_capacity = match capacity_msat { - Some(capacity_msat) => { - htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat); - EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: htlc_maximum_msat } - }, - None => EffectiveCapacity::MaximumHTLC { amount_msat: htlc_maximum_msat }, - }; - - Self { - channel, direction, htlc_maximum_msat, effective_capacity - } + fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool) -> Self { + Self { channel, direction, from_node_one } } /// Returns information for the channel. #[inline] pub fn channel(&self) -> &'a ChannelInfo { self.channel } - /// Returns the maximum HTLC amount allowed over the channel in the direction. - #[inline] - pub fn htlc_maximum_msat(&self) -> u64 { - self.htlc_maximum_msat - } - /// Returns the [`EffectiveCapacity`] of the channel in the direction. /// /// This is either the total capacity from the funding transaction, if known, or the /// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known, /// otherwise. + #[inline] pub fn effective_capacity(&self) -> EffectiveCapacity { - self.effective_capacity + let mut htlc_maximum_msat = self.direction().htlc_maximum_msat; + let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); + + match capacity_msat { + Some(capacity_msat) => { + htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat); + EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat } + }, + None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat }, + } } /// Returns information for the direction. #[inline] pub(super) fn direction(&self) -> &'a ChannelUpdateInfo { self.direction } + + /// Returns the `node_id` of the source hop. + /// + /// Refers to the `node_id` forwarding the payment to the next hop. + #[inline] + pub fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } } + + /// Returns the `node_id` of the target hop. + /// + /// Refers to the `node_id` receiving the payment from the previous hop. + #[inline] + pub fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } } } impl<'a> fmt::Debug for DirectedChannelInfo<'a> { @@ -1019,7 +1073,7 @@ pub enum EffectiveCapacity { liquidity_msat: u64, }, /// The maximum HTLC amount in one direction as advertised on the gossip network. - MaximumHTLC { + AdvertisedMaxHTLC { /// The maximum HTLC amount denominated in millisatoshi. amount_msat: u64, }, @@ -1033,6 +1087,11 @@ pub enum EffectiveCapacity { /// A capacity sufficient to route any payment, typically used for private channels provided by /// an invoice. Infinite, + /// The maximum HTLC amount as provided by an invoice route hint. + HintMaxHTLC { + /// The maximum HTLC amount denominated in millisatoshi. + amount_msat: u64, + }, /// A capacity that is unknown possibly because either the chain state is unavailable to know /// the total capacity or the `htlc_maximum_msat` was not advertised on the gossip network. Unknown, @@ -1047,8 +1106,9 @@ impl EffectiveCapacity { pub fn as_msat(&self) -> u64 { match self { EffectiveCapacity::ExactLiquidity { liquidity_msat } => *liquidity_msat, - EffectiveCapacity::MaximumHTLC { amount_msat } => *amount_msat, + EffectiveCapacity::AdvertisedMaxHTLC { amount_msat } => *amount_msat, EffectiveCapacity::Total { capacity_msat, .. } => *capacity_msat, + EffectiveCapacity::HintMaxHTLC { amount_msat } => *amount_msat, EffectiveCapacity::Infinite => u64::max_value(), EffectiveCapacity::Unknown => UNKNOWN_CHANNEL_CAPACITY_MSAT, } @@ -1056,7 +1116,7 @@ impl EffectiveCapacity { } /// Fees for routing via a given channel or a node -#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)] +#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash, Ord, PartialOrd)] pub struct RoutingFees { /// Flat routing fee in millisatoshis. pub base_msat: u32, @@ -1093,7 +1153,7 @@ pub struct NodeAnnouncementInfo { impl NodeAnnouncementInfo { /// Internet-level addresses via which one can connect to the node - pub fn addresses(&self) -> &[NetAddress] { + pub fn addresses(&self) -> &[SocketAddress] { self.announcement_message.as_ref() .map(|msg| msg.contents.addresses.as_slice()) .unwrap_or_default() @@ -1102,40 +1162,40 @@ impl NodeAnnouncementInfo { impl Writeable for NodeAnnouncementInfo { fn write(&self, writer: &mut W) -> Result<(), io::Error> { - let empty_addresses = Vec::::new(); + let empty_addresses = Vec::::new(); write_tlv_fields!(writer, { (0, self.features, required), (2, self.last_update, required), (4, self.rgb, required), (6, self.alias, required), (8, self.announcement_message, option), - (10, empty_addresses, vec_type), // Versions prior to 0.0.115 require this field + (10, empty_addresses, required_vec), // Versions prior to 0.0.115 require this field }); Ok(()) } } impl Readable for NodeAnnouncementInfo { - fn read(reader: &mut R) -> Result { - _init_and_read_tlv_fields!(reader, { + fn read(reader: &mut R) -> Result { + _init_and_read_len_prefixed_tlv_fields!(reader, { (0, features, required), (2, last_update, required), (4, rgb, required), (6, alias, required), (8, announcement_message, option), - (10, _addresses, vec_type), // deprecated, not used anymore + (10, _addresses, optional_vec), // deprecated, not used anymore }); - let _: Option> = _addresses; + let _: Option> = _addresses; Ok(Self { features: features.0.unwrap(), last_update: last_update.0.unwrap(), rgb: rgb.0.unwrap(), alias: alias.0.unwrap(), announcement_message }) - } + } } /// A user-defined name for a node, which may be used when displaying the node in a graph. /// /// Since node aliases are provided by third parties, they are a potential avenue for injection /// attacks. Care must be taken when processing. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub struct NodeAlias(pub [u8; 32]); impl fmt::Display for NodeAlias { @@ -1181,6 +1241,18 @@ pub struct NodeInfo { pub announcement_info: Option } +impl NodeInfo { + /// Returns whether the node has only announced Tor addresses. + pub fn is_tor_only(&self) -> bool { + self.announcement_info + .as_ref() + .map(|info| info.addresses()) + .and_then(|addresses| (!addresses.is_empty()).then(|| addresses)) + .map(|addresses| addresses.iter().all(|address| address.is_tor())) + .unwrap_or(false) + } +} + impl fmt::Display for NodeInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, " channels: {:?}, announcement_info: {:?}", @@ -1194,14 +1266,14 @@ impl Writeable for NodeInfo { write_tlv_fields!(writer, { // Note that older versions of LDK wrote the lowest inbound fees here at type 0 (2, self.announcement_info, option), - (4, self.channels, vec_type), + (4, self.channels, required_vec), }); Ok(()) } } // A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is -// necessary to maintain compatibility with previous serializations of `NetAddress` that have an +// necessary to maintain compatibility with previous serializations of `SocketAddress` that have an // invalid hostname set. We ignore and eat all errors until we are either able to read a // `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end. struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo); @@ -1225,19 +1297,17 @@ impl Readable for NodeInfo { // with zero inbound fees, causing that heuristic to provide little gain. Worse, because it // requires additional complexity and lookups during routing, it ends up being a // performance loss. Thus, we simply ignore the old field here and no longer track it. - let mut _lowest_inbound_channel_fees: Option = None; - let mut announcement_info_wrap: Option = None; - _init_tlv_field_var!(channels, vec_type); - - read_tlv_fields!(reader, { + _init_and_read_len_prefixed_tlv_fields!(reader, { (0, _lowest_inbound_channel_fees, option), (2, announcement_info_wrap, upgradable_option), - (4, channels, vec_type), + (4, channels, required_vec), }); + let _: Option = _lowest_inbound_channel_fees; + let announcement_info_wrap: Option = announcement_info_wrap; Ok(NodeInfo { announcement_info: announcement_info_wrap.map(|w| w.0), - channels: _init_tlv_based_struct_field!(channels, vec_type), + channels, }) } } @@ -1249,7 +1319,7 @@ impl Writeable for NetworkGraph where L::Target: Logger { fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); - self.genesis_hash.write(writer)?; + self.chain_hash.write(writer)?; let channels = self.channels.read().unwrap(); (channels.len() as u64).write(writer)?; for (ref chan_id, ref chan_info) in channels.unordered_iter() { @@ -1275,16 +1345,18 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { fn read(reader: &mut R, logger: L) -> Result, DecodeError> { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); - let genesis_hash: BlockHash = Readable::read(reader)?; + let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - let mut channels = IndexedMap::new(); + // In Nov, 2023 there were about 15,000 nodes; we cap allocations to 1.5x that. + let mut channels = IndexedMap::with_capacity(cmp::min(channels_count as usize, 22500)); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info = Readable::read(reader)?; channels.insert(chan_id, chan_info); } let nodes_count: u64 = Readable::read(reader)?; - let mut nodes = IndexedMap::new(); + // In Nov, 2023 there were about 69K channels; we cap allocations to 1.5x that. + let mut nodes = IndexedMap::with_capacity(cmp::min(nodes_count as usize, 103500)); for _ in 0..nodes_count { let node_id = Readable::read(reader)?; let node_info = Readable::read(reader)?; @@ -1298,13 +1370,13 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { Ok(NetworkGraph { secp_ctx: Secp256k1::verification_only(), - genesis_hash, + chain_hash, logger, channels: RwLock::new(channels), nodes: RwLock::new(nodes), last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp), - removed_nodes: Mutex::new(HashMap::new()), - removed_channels: Mutex::new(HashMap::new()), + removed_nodes: Mutex::new(new_hash_map()), + removed_channels: Mutex::new(new_hash_map()), pending_checks: utxo::PendingChecks::new(), }) } @@ -1318,7 +1390,7 @@ impl fmt::Display for NetworkGraph where L::Target: Logger { } writeln!(f, "[Nodes]")?; for (&node_id, val) in self.nodes.read().unwrap().unordered_iter() { - writeln!(f, " {}: {}", log_bytes!(node_id.as_slice()), val)?; + writeln!(f, " {}: {}", &node_id, val)?; } Ok(()) } @@ -1327,9 +1399,14 @@ impl fmt::Display for NetworkGraph where L::Target: Logger { impl Eq for NetworkGraph where L::Target: Logger {} impl PartialEq for NetworkGraph where L::Target: Logger { fn eq(&self, other: &Self) -> bool { - self.genesis_hash == other.genesis_hash && - *self.channels.read().unwrap() == *other.channels.read().unwrap() && - *self.nodes.read().unwrap() == *other.nodes.read().unwrap() + // For a total lockorder, sort by position in memory and take the inner locks in that order. + // (Assumes that we can't move within memory while a lock is held). + let ord = ((self as *const _) as usize) < ((other as *const _) as usize); + let a = if ord { (&self.channels, &self.nodes) } else { (&other.channels, &other.nodes) }; + let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) }; + let (channels_a, channels_b) = (a.0.unsafe_well_ordered_double_lock_self(), b.0.unsafe_well_ordered_double_lock_self()); + let (nodes_a, nodes_b) = (a.1.unsafe_well_ordered_double_lock_self(), b.1.unsafe_well_ordered_double_lock_self()); + self.chain_hash.eq(&other.chain_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b) } } @@ -1338,13 +1415,13 @@ impl NetworkGraph where L::Target: Logger { pub fn new(network: Network, logger: L) -> NetworkGraph { Self { secp_ctx: Secp256k1::verification_only(), - genesis_hash: genesis_block(network).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(network), logger, channels: RwLock::new(IndexedMap::new()), nodes: RwLock::new(IndexedMap::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), - removed_channels: Mutex::new(HashMap::new()), - removed_nodes: Mutex::new(HashMap::new()), + removed_channels: Mutex::new(new_hash_map()), + removed_nodes: Mutex::new(new_hash_map()), pending_checks: utxo::PendingChecks::new(), } } @@ -1387,8 +1464,7 @@ impl NetworkGraph where L::Target: Logger { /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. pub fn update_node_from_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<(), LightningError> { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement"); + verify_node_announcement(msg, &self.secp_ctx)?; self.update_node_from_announcement_intern(&msg.contents, Some(&msg)) } @@ -1451,11 +1527,7 @@ impl NetworkGraph where L::Target: Logger { where U::Target: UtxoLookup, { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement"); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement"); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement"); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), "channel_announcement"); + verify_channel_announcement(msg, &self.secp_ctx)?; self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup) } @@ -1521,6 +1593,8 @@ impl NetworkGraph where L::Target: Logger { let node_id_a = channel_info.node_one.clone(); let node_id_b = channel_info.node_two.clone(); + log_gossip!(self.logger, "Adding channel {} between nodes {} and {}", short_channel_id, node_id_a, node_id_b); + match channels.entry(short_channel_id) { IndexedMapEntry::Occupied(mut entry) => { //TODO: because asking the blockchain if short_channel_id is valid is only optional @@ -1573,6 +1647,13 @@ impl NetworkGraph where L::Target: Logger { return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); } + if msg.chain_hash != self.chain_hash { + return Err(LightningError { + err: "Channel announcement chain hash does not match genesis hash".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + }); + } + { let channels = self.channels.read().unwrap(); @@ -1749,16 +1830,23 @@ impl NetworkGraph where L::Target: Logger { let mut scids_to_remove = Vec::new(); for (scid, info) in channels.unordered_iter_mut() { if info.one_to_two.is_some() && info.one_to_two.as_ref().unwrap().last_update < min_time_unix { + log_gossip!(self.logger, "Removing directional update one_to_two (0) for channel {} due to its timestamp {} being below {}", + scid, info.one_to_two.as_ref().unwrap().last_update, min_time_unix); info.one_to_two = None; } if info.two_to_one.is_some() && info.two_to_one.as_ref().unwrap().last_update < min_time_unix { + log_gossip!(self.logger, "Removing directional update two_to_one (1) for channel {} due to its timestamp {} being below {}", + scid, info.two_to_one.as_ref().unwrap().last_update, min_time_unix); info.two_to_one = None; } if info.one_to_two.is_none() || info.two_to_one.is_none() { // We check the announcement_received_time here to ensure we don't drop // announcements that we just received and are just waiting for our peer to send a // channel_update for. - if info.announcement_received_time < min_time_unix as u64 { + let announcement_received_timestamp = info.announcement_received_time; + if announcement_received_timestamp < min_time_unix as u64 { + log_gossip!(self.logger, "Removing channel {} because both directional updates are missing and its announcement timestamp {} being below {}", + scid, announcement_received_timestamp, min_time_unix); scids_to_remove.push(*scid); } } @@ -1779,7 +1867,7 @@ impl NetworkGraph where L::Target: Logger { // NOTE: In the case of no-std, we won't have access to the current UNIX time at the time of removal, // so we'll just set the removal time here to the current UNIX time on the very next invocation // of this function. - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] { let mut tracked_time = Some(current_time_unix); core::mem::swap(time, &mut tracked_time); @@ -1796,14 +1884,14 @@ impl NetworkGraph where L::Target: Logger { /// For an already known (from announcement) channel, update info about one of the directions /// of the channel. /// - /// You probably don't want to call this directly, instead relying on a P2PGossipSync's - /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept + /// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s + /// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. /// /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. pub fn update_channel(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> { - self.update_channel_intern(&msg.contents, Some(&msg), Some(&msg.signature)) + self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), false) } /// For an already known (from announcement) channel, update info about one of the directions @@ -1813,12 +1901,32 @@ impl NetworkGraph where L::Target: Logger { /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. pub fn update_channel_unsigned(&self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { - self.update_channel_intern(msg, None, None) + self.update_channel_internal(msg, None, None, false) } - fn update_channel_intern(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>) -> Result<(), LightningError> { + /// For an already known (from announcement) channel, verify the given [`ChannelUpdate`]. + /// + /// This checks whether the update currently is applicable by [`Self::update_channel`]. + /// + /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or + /// materially in the future will be rejected. + pub fn verify_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> { + self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), true) + } + + fn update_channel_internal(&self, msg: &msgs::UnsignedChannelUpdate, + full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>, + only_verify: bool) -> Result<(), LightningError> + { let chan_enabled = msg.flags & (1 << 1) != (1 << 1); + if msg.chain_hash != self.chain_hash { + return Err(LightningError { + err: "Channel update chain hash does not match genesis hash".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + }); + } + #[cfg(all(feature = "std", not(test), not(feature = "_test_utils")))] { // Note that many tests rely on being able to set arbitrarily old timestamps, thus we @@ -1832,12 +1940,17 @@ impl NetworkGraph where L::Target: Logger { } } + log_gossip!(self.logger, "Updating channel {} in direction {} with timestamp {}", msg.short_channel_id, msg.flags & 1, msg.timestamp); + let mut channels = self.channels.write().unwrap(); match channels.get_mut(&msg.short_channel_id) { None => { core::mem::drop(channels); self.pending_checks.check_hold_pending_channel_update(msg, full_msg)?; - return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError { + err: "Couldn't find channel for update".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Gossip), + }); }, Some(channel) => { if msg.htlc_maximum_msat > MAX_VALUE_MSAT { @@ -1894,7 +2007,7 @@ impl NetworkGraph where L::Target: Logger { } } } - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]); if msg.flags & 1 == 1 { check_update_latest!(channel.two_to_one); if let Some(sig) = sig { @@ -1903,7 +2016,9 @@ impl NetworkGraph where L::Target: Logger { action: ErrorAction::IgnoreAndLog(Level::Debug) })?, "channel_update"); } - channel.two_to_one = get_new_channel_info!(); + if !only_verify { + channel.two_to_one = get_new_channel_info!(); + } } else { check_update_latest!(channel.one_to_two); if let Some(sig) = sig { @@ -1912,7 +2027,9 @@ impl NetworkGraph where L::Target: Logger { action: ErrorAction::IgnoreAndLog(Level::Debug) })?, "channel_update"); } - channel.one_to_two = get_new_channel_info!(); + if !only_verify { + channel.one_to_two = get_new_channel_info!(); + } } } } @@ -1981,7 +2098,7 @@ impl ReadOnlyNetworkGraph<'_> { /// Get network addresses by node id. /// Returns None if the requested node is completely unknown, /// or if node announcement for the node was never received. - pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { + pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { self.nodes.get(&NodeId::from_pubkey(&pubkey)) .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec())) } @@ -1994,6 +2111,7 @@ pub(crate) mod tests { use crate::ln::chan_utils::make_funding_redeemscript; #[cfg(feature = "std")] use crate::ln::features::InitFeatures; + use crate::ln::msgs::SocketAddress; use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo}; use crate::routing::utxo::{UtxoLookupError, UtxoResult}; use crate::ln::msgs::{RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, @@ -2001,7 +2119,7 @@ pub(crate) mod tests { ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; use crate::util::config::UserConfig; use crate::util::test_utils; - use crate::util::ser::{ReadableArgs, Readable, Writeable}; + use crate::util::ser::{Hostname, ReadableArgs, Readable, Writeable}; use crate::util::scid_utils::scid_from_parts; use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS; @@ -2009,13 +2127,11 @@ pub(crate) mod tests { use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; + use bitcoin::hashes::hex::FromHex; use bitcoin::network::constants::Network; - use bitcoin::blockdata::constants::genesis_block; - use bitcoin::blockdata::script::Script; + use bitcoin::blockdata::constants::ChainHash; + use bitcoin::blockdata::script::ScriptBuf; use bitcoin::blockdata::transaction::TxOut; - - use hex; - use bitcoin::secp256k1::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; @@ -2044,7 +2160,7 @@ pub(crate) mod tests { fn request_full_sync_finite_times() { let network_graph = create_network_graph(); let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); - let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); + let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); assert!(gossip_sync.should_request_full_sync(&node_id)); assert!(gossip_sync.should_request_full_sync(&node_id)); @@ -2082,7 +2198,7 @@ pub(crate) mod tests { let mut unsigned_announcement = UnsignedChannelAnnouncement { features: channelmanager::provided_channel_features(&UserConfig::default()), - chain_hash: genesis_block(Network::Testnet).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 0, node_id_1: NodeId::from_pubkey(&node_id_1), node_id_2: NodeId::from_pubkey(&node_id_2), @@ -2101,7 +2217,7 @@ pub(crate) mod tests { } } - pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> Script { + pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> ScriptBuf { let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap(); make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey), @@ -2110,7 +2226,7 @@ pub(crate) mod tests { pub(crate) fn get_signed_channel_update(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelUpdate { let mut unsigned_channel_update = UnsignedChannelUpdate { - chain_hash: genesis_block(Network::Testnet).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 0, timestamp: 100, flags: 0, @@ -2158,7 +2274,7 @@ pub(crate) mod tests { Err(_) => panic!() }; - let fake_msghash = hash_to_message!(&zero_hash); + let fake_msghash = hash_to_message!(zero_hash.as_byte_array()); match gossip_sync.handle_node_announcement( &NodeAnnouncement { signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey), @@ -2311,6 +2427,16 @@ pub(crate) mod tests { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself") }; + + // Test that channel announcements with the wrong chain hash are ignored (network graph is testnet, + // announcement is mainnet). + let incorrect_chain_announcement = get_signed_channel_announcement(|unsigned_announcement| { + unsigned_announcement.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); + }, node_1_privkey, node_2_privkey, &secp_ctx); + match gossip_sync.handle_channel_announcement(&incorrect_chain_announcement) { + Ok(_) => panic!(), + Err(e) => assert_eq!(e.err, "Channel announcement chain hash does not match genesis hash") + }; } #[test] @@ -2343,6 +2469,7 @@ pub(crate) mod tests { } let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); + network_graph.verify_channel_update(&valid_channel_update).unwrap(); match gossip_sync.handle_channel_update(&valid_channel_update) { Ok(res) => assert!(res), _ => panic!(), @@ -2409,12 +2536,23 @@ pub(crate) mod tests { unsigned_channel_update.timestamp += 500; }, node_1_privkey, &secp_ctx); let zero_hash = Sha256dHash::hash(&[0; 32]); - let fake_msghash = hash_to_message!(&zero_hash); + let fake_msghash = hash_to_message!(zero_hash.as_byte_array()); invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey); match gossip_sync.handle_channel_update(&invalid_sig_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message") }; + + // Test that channel updates with the wrong chain hash are ignored (network graph is testnet, channel + // update is mainet). + let incorrect_chain_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); + }, node_1_privkey, &secp_ctx); + + match gossip_sync.handle_channel_update(&incorrect_chain_update) { + Ok(_) => panic!(), + Err(e) => assert_eq!(e.err, "Channel update chain hash does not match genesis hash") + }; } #[test] @@ -2434,7 +2572,8 @@ pub(crate) mod tests { let short_channel_id; { - // Announce a channel we will update + // Check we won't apply an update via `handle_network_update` for privacy reasons, but + // can continue fine if we manually apply it. let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); short_channel_id = valid_channel_announcement.contents.short_channel_id; let chain_source: Option<&test_utils::TestChainSource> = None; @@ -2445,10 +2584,11 @@ pub(crate) mod tests { assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); network_graph.handle_network_update(&NetworkUpdate::ChannelUpdateMessage { - msg: valid_channel_update, + msg: valid_channel_update.clone(), }); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); + assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); + network_graph.update_channel(&valid_channel_update).unwrap(); } // Non-permanent failure doesn't touch the channel at all @@ -2846,11 +2986,11 @@ pub(crate) mod tests { let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); // It should ignore if gossip_queries feature is not enabled { - let init_msg = Init { features: InitFeatures::empty(), remote_network_address: None }; + let init_msg = Init { features: InitFeatures::empty(), networks: None, remote_network_address: None }; gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap(); let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); @@ -2860,7 +3000,7 @@ pub(crate) mod tests { { let mut features = InitFeatures::empty(); features.set_gossip_queries_optional(); - let init_msg = Init { features, remote_network_address: None }; + let init_msg = Init { features, networks: None, remote_network_address: None }; gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap(); let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2883,7 +3023,7 @@ pub(crate) mod tests { let network_graph = create_network_graph(); let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); @@ -2935,13 +3075,13 @@ pub(crate) mod tests { &gossip_sync, &node_id_2, QueryChannelRange { - chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Bitcoin), first_blocknum: 0, number_of_blocks: 0xffff_ffff, }, false, vec![ReplyChannelRange { - chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Bitcoin), first_blocknum: 0, number_of_blocks: 0xffff_ffff, sync_complete: true, @@ -3176,7 +3316,7 @@ pub(crate) mod tests { let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); let result = gossip_sync.handle_query_short_channel_ids(&node_id, QueryShortChannelIds { chain_hash, @@ -3235,16 +3375,16 @@ pub(crate) mod tests { assert_eq!(chan_update_info, read_chan_update_info); // Check the serialization hasn't changed. - let legacy_chan_update_info_with_some: Vec = hex::decode("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap(); + let legacy_chan_update_info_with_some: Vec = >::from_hex("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap(); assert_eq!(encoded_chan_update_info, legacy_chan_update_info_with_some); // Check we fail if htlc_maximum_msat is not present in either the ChannelUpdateInfo itself // or the ChannelUpdate enclosed with `last_update_message`. - let legacy_chan_update_info_with_some_and_fail_update: Vec = hex::decode("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap(); + let legacy_chan_update_info_with_some_and_fail_update: Vec = >::from_hex("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap(); let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_some_and_fail_update.as_slice()); assert!(read_chan_update_info_res.is_err()); - let legacy_chan_update_info_with_none: Vec = hex::decode("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap(); + let legacy_chan_update_info_with_none: Vec = >::from_hex("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap(); let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice()); assert!(read_chan_update_info_res.is_err()); @@ -3286,18 +3426,18 @@ pub(crate) mod tests { assert_eq!(chan_info_some_updates, read_chan_info); // Check the serialization hasn't changed. - let legacy_chan_info_with_some: Vec = hex::decode("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); + let legacy_chan_info_with_some: Vec = >::from_hex("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); assert_eq!(encoded_chan_info, legacy_chan_info_with_some); // Check we can decode legacy ChannelInfo, even if the `two_to_one` / `one_to_two` / // `last_update_message` fields fail to decode due to missing htlc_maximum_msat. - let legacy_chan_info_with_some_and_fail_update = hex::decode("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap(); + let legacy_chan_info_with_some_and_fail_update = >::from_hex("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap(); let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_some_and_fail_update.as_slice()).unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); assert_eq!(read_chan_info.two_to_one, None); - let legacy_chan_info_with_none: Vec = hex::decode("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); + let legacy_chan_info_with_none: Vec = >::from_hex("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); @@ -3307,7 +3447,7 @@ pub(crate) mod tests { #[test] fn node_info_is_readable() { // 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one - let announcement_message = hex::decode("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); + let announcement_message = >::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); let announcement_message = NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap(); let valid_node_ann_info = NodeAnnouncementInfo { features: channelmanager::provided_node_features(&UserConfig::default()), @@ -3323,7 +3463,7 @@ pub(crate) mod tests { assert_eq!(read_valid_node_ann_info, valid_node_ann_info); assert_eq!(read_valid_node_ann_info.addresses().len(), 1); - let encoded_invalid_node_ann_info = hex::decode("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); + let encoded_invalid_node_ann_info = >::from_hex("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); let read_invalid_node_ann_info_res = NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice()); assert!(read_invalid_node_ann_info_res.is_err()); @@ -3338,46 +3478,155 @@ pub(crate) mod tests { let read_valid_node_info = NodeInfo::read(&mut encoded_valid_node_info.as_slice()).unwrap(); assert_eq!(read_valid_node_info, valid_node_info); - let encoded_invalid_node_info_hex = hex::decode("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); + let encoded_invalid_node_info_hex = >::from_hex("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); let read_invalid_node_info = NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap(); assert_eq!(read_invalid_node_info.announcement_info, None); } #[test] fn test_node_info_keeps_compatibility() { - let old_ann_info_with_addresses = hex::decode("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); + let old_ann_info_with_addresses = >::from_hex("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); let ann_info_with_addresses = NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice()) .expect("to be able to read an old NodeAnnouncementInfo with addresses"); // This serialized info has an address field but no announcement_message, therefore the addresses returned by our function will still be empty assert!(ann_info_with_addresses.addresses().is_empty()); } + + #[test] + fn test_node_id_display() { + let node_id = NodeId([42; 33]); + assert_eq!(format!("{}", &node_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"); + } + + #[test] + fn is_tor_only_node() { + let network_graph = create_network_graph(); + let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); + + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + let node_1_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey)); + + let announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + gossip_sync.handle_channel_announcement(&announcement).unwrap(); + + let tcp_ip_v4 = SocketAddress::TcpIpV4 { + addr: [255, 254, 253, 252], + port: 9735 + }; + let tcp_ip_v6 = SocketAddress::TcpIpV6 { + addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240], + port: 9735 + }; + let onion_v2 = SocketAddress::OnionV2([255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]); + let onion_v3 = SocketAddress::OnionV3 { + ed25519_pubkey: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224], + checksum: 32, + version: 16, + port: 9735 + }; + let hostname = SocketAddress::Hostname { + hostname: Hostname::try_from(String::from("host")).unwrap(), + port: 9735, + }; + + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![ + tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone(), + hostname.clone() + ]; + announcement.timestamp += 1000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![ + tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone() + ]; + announcement.timestamp += 2000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![ + tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone() + ]; + announcement.timestamp += 3000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![onion_v2.clone(), onion_v3.clone()]; + announcement.timestamp += 4000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![onion_v2.clone()]; + announcement.timestamp += 5000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![tcp_ip_v4.clone()]; + announcement.timestamp += 6000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + } } -#[cfg(all(test, feature = "_bench_unstable"))] -mod benches { +#[cfg(ldk_bench)] +pub mod benches { use super::*; - - use test::Bencher; use std::io::Read; + use criterion::{black_box, Criterion}; - #[bench] - fn read_network_graph(bench: &mut Bencher) { + pub fn read_network_graph(bench: &mut Criterion) { let logger = crate::util::test_utils::TestLogger::new(); let mut d = crate::routing::router::bench_utils::get_route_file().unwrap(); let mut v = Vec::new(); d.read_to_end(&mut v).unwrap(); - bench.iter(|| { - let _ = NetworkGraph::read(&mut std::io::Cursor::new(&v), &logger).unwrap(); - }); + bench.bench_function("read_network_graph", |b| b.iter(|| + NetworkGraph::read(&mut std::io::Cursor::new(black_box(&v)), &logger).unwrap() + )); } - #[bench] - fn write_network_graph(bench: &mut Bencher) { + pub fn write_network_graph(bench: &mut Criterion) { let logger = crate::util::test_utils::TestLogger::new(); let mut d = crate::routing::router::bench_utils::get_route_file().unwrap(); let net_graph = NetworkGraph::read(&mut d, &logger).unwrap(); - bench.iter(|| { - let _ = net_graph.encode(); - }); + bench.bench_function("write_network_graph", |b| b.iter(|| + black_box(&net_graph).encode() + )); } }