X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fgossip.rs;h=42bf20a78a51684056f896b50fc510d18b279d0f;hb=ae0d825d89ca0ac2489737d1b413e778650b093c;hp=ef776a44dc11c484526f7f5baeb64db06498710b;hpb=3141630f78a0f1ec7ef374ede95a21e4e64213f5;p=rust-lightning diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index ef776a44..42bf20a7 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -9,6 +9,8 @@ //! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers +use bitcoin::blockdata::constants::ChainHash; + use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; use bitcoin::secp256k1::{PublicKey, Verification}; use bitcoin::secp256k1::Secp256k1; @@ -16,11 +18,7 @@ use bitcoin::secp256k1; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; -use bitcoin::hashes::hex::FromHex; -use bitcoin::hash_types::BlockHash; - use bitcoin::network::constants::Network; -use bitcoin::blockdata::constants::genesis_block; use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::ln::ChannelId; @@ -40,7 +38,6 @@ use crate::io; use crate::io_extras::{copy, sink}; use crate::prelude::*; use core::{cmp, fmt}; -use core::convert::TryFrom; use crate::sync::{RwLock, RwLockReadGuard, LockTestExt}; #[cfg(feature = "std")] use core::sync::atomic::{AtomicUsize, Ordering}; @@ -76,11 +73,26 @@ impl NodeId { NodeId(pubkey.serialize()) } + /// Create a new NodeId from a slice of bytes + pub fn from_slice(bytes: &[u8]) -> Result { + if bytes.len() != PUBLIC_KEY_SIZE { + return Err(DecodeError::InvalidValue); + } + let mut data = [0; PUBLIC_KEY_SIZE]; + data.copy_from_slice(bytes); + Ok(NodeId(data)) + } + /// Get the public key slice from this NodeId pub fn as_slice(&self) -> &[u8] { &self.0 } + /// Get the public key as an array from this NodeId + pub fn as_array(&self) -> &[u8; PUBLIC_KEY_SIZE] { + &self.0 + } + /// Get the public key from this NodeId pub fn as_pubkey(&self) -> Result { PublicKey::from_slice(&self.0) @@ -154,10 +166,10 @@ impl TryFrom for PublicKey { } impl FromStr for NodeId { - type Err = bitcoin::hashes::hex::Error; + type Err = hex::parse::HexToArrayError; fn from_str(s: &str) -> Result { - let data: [u8; PUBLIC_KEY_SIZE] = FromHex::from_hex(s)?; + let data: [u8; PUBLIC_KEY_SIZE] = hex::FromHex::from_hex(s)?; Ok(NodeId(data)) } } @@ -166,7 +178,7 @@ impl FromStr for NodeId { pub struct NetworkGraph where L::Target: Logger { secp_ctx: Secp256k1, last_rapid_gossip_sync_timestamp: Mutex>, - genesis_hash: BlockHash, + chain_hash: ChainHash, logger: L, // Lock order: channels -> nodes channels: RwLock>, @@ -341,6 +353,9 @@ where U::Target: UtxoLookup, L::Target: Logger impl NetworkGraph where L::Target: Logger { /// Handles any network updates originating from [`Event`]s. + // + /// Note that this will skip applying any [`NetworkUpdate::ChannelUpdateMessage`] to avoid + /// leaking possibly identifying information of the sender to the public network. /// /// [`Event`]: crate::events::Event pub fn handle_network_update(&self, network_update: &NetworkUpdate) { @@ -349,8 +364,7 @@ impl NetworkGraph where L::Target: Logger { let short_channel_id = msg.contents.short_channel_id; let is_enabled = msg.contents.flags & (1 << 1) != (1 << 1); let status = if is_enabled { "enabled" } else { "disabled" }; - log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}.", short_channel_id, status); - let _ = self.update_channel(msg); + log_debug!(self.logger, "Skipping application of a channel update from a payment failure. Channel {} is {}.", short_channel_id, status); }, NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => { if is_permanent { @@ -368,9 +382,9 @@ impl NetworkGraph where L::Target: Logger { } } - /// Gets the genesis hash for this network graph. - pub fn get_genesis_hash(&self) -> BlockHash { - self.genesis_hash + /// Gets the chain hash for this network graph. + pub fn get_chain_hash(&self) -> ChainHash { + self.chain_hash } } @@ -410,11 +424,17 @@ macro_rules! get_pubkey_from_node_id { } } +fn message_sha256d_hash(msg: &M) -> Sha256dHash { + let mut engine = Sha256dHash::engine(); + msg.write(&mut engine).expect("In-memory structs should not fail to serialize"); + Sha256dHash::from_engine(engine) +} + /// Verifies the signature of a [`NodeAnnouncement`]. /// /// Returns an error if it is invalid. pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement"); Ok(()) @@ -424,7 +444,7 @@ pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ct /// /// Returns an error if one of the signatures is invalid. pub fn verify_channel_announcement(msg: &ChannelAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement"); secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement"); secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement"); @@ -581,7 +601,7 @@ where U::Target: UtxoLookup, L::Target: Logger pending_events.push(MessageSendEvent::SendGossipTimestampFilter { node_id: their_node_id.clone(), msg: GossipTimestampFilter { - chain_hash: self.network_graph.genesis_hash, + chain_hash: self.network_graph.chain_hash, first_timestamp: gossip_start_time as u32, // 2106 issue! timestamp_range: u32::max_value(), }, @@ -620,7 +640,7 @@ where U::Target: UtxoLookup, L::Target: Logger let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); // Per spec, we must reply to a query. Send an empty message when things are invalid. - if msg.chain_hash != self.network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { + if msg.chain_hash != self.network_graph.chain_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(MessageSendEvent::SendReplyChannelRange { node_id: their_node_id.clone(), @@ -687,7 +707,7 @@ where U::Target: UtxoLookup, L::Target: Logger // Prior replies should use the number of blocks that fit into the reply. Overflow // safe since first_blocknum is always <= last SCID's block. else { - (false, block_from_scid(batch.last().unwrap()) - first_blocknum) + (false, block_from_scid(*batch.last().unwrap()) - first_blocknum) }; prev_batch_endblock = first_blocknum + number_of_blocks; @@ -859,31 +879,31 @@ impl ChannelInfo { /// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a /// returned `source`, or `None` if `target` is not one of the channel's counterparties. pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - let (direction, source) = { + let (direction, source, outbound) = { if target == &self.node_one { - (self.two_to_one.as_ref(), &self.node_two) + (self.two_to_one.as_ref(), &self.node_two, false) } else if target == &self.node_two { - (self.one_to_two.as_ref(), &self.node_one) + (self.one_to_two.as_ref(), &self.node_one, true) } else { return None; } }; - direction.map(|dir| (DirectedChannelInfo::new(self, dir), source)) + direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), source)) } /// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a /// returned `target`, or `None` if `source` is not one of the channel's counterparties. pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - let (direction, target) = { + let (direction, target, outbound) = { if source == &self.node_one { - (self.one_to_two.as_ref(), &self.node_two) + (self.one_to_two.as_ref(), &self.node_two, true) } else if source == &self.node_two { - (self.two_to_one.as_ref(), &self.node_one) + (self.two_to_one.as_ref(), &self.node_one, false) } else { return None; } }; - direction.map(|dir| (DirectedChannelInfo::new(self, dir), target)) + direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), target)) } /// Returns a [`ChannelUpdateInfo`] based on the direction implied by the channel_flag. @@ -979,51 +999,55 @@ impl Readable for ChannelInfo { pub struct DirectedChannelInfo<'a> { channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, - htlc_maximum_msat: u64, - effective_capacity: EffectiveCapacity, + /// The direction this channel is in - if set, it indicates that we're traversing the channel + /// from [`ChannelInfo::node_one`] to [`ChannelInfo::node_two`]. + from_node_one: bool, } impl<'a> DirectedChannelInfo<'a> { #[inline] - fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo) -> Self { - let mut htlc_maximum_msat = direction.htlc_maximum_msat; - let capacity_msat = channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); - - let effective_capacity = match capacity_msat { - Some(capacity_msat) => { - htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat); - EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: htlc_maximum_msat } - }, - None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat }, - }; - - Self { - channel, direction, htlc_maximum_msat, effective_capacity - } + fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool) -> Self { + Self { channel, direction, from_node_one } } /// Returns information for the channel. #[inline] pub fn channel(&self) -> &'a ChannelInfo { self.channel } - /// Returns the maximum HTLC amount allowed over the channel in the direction. - #[inline] - pub fn htlc_maximum_msat(&self) -> u64 { - self.htlc_maximum_msat - } - /// Returns the [`EffectiveCapacity`] of the channel in the direction. /// /// This is either the total capacity from the funding transaction, if known, or the /// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known, /// otherwise. + #[inline] pub fn effective_capacity(&self) -> EffectiveCapacity { - self.effective_capacity + let mut htlc_maximum_msat = self.direction().htlc_maximum_msat; + let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); + + match capacity_msat { + Some(capacity_msat) => { + htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat); + EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat } + }, + None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat }, + } } /// Returns information for the direction. #[inline] pub(super) fn direction(&self) -> &'a ChannelUpdateInfo { self.direction } + + /// Returns the `node_id` of the source hop. + /// + /// Refers to the `node_id` forwarding the payment to the next hop. + #[inline] + pub fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } } + + /// Returns the `node_id` of the target hop. + /// + /// Refers to the `node_id` receiving the payment from the previous hop. + #[inline] + pub fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } } } impl<'a> fmt::Debug for DirectedChannelInfo<'a> { @@ -1170,7 +1194,7 @@ impl Readable for NodeAnnouncementInfo { /// /// Since node aliases are provided by third parties, they are a potential avenue for injection /// attacks. Care must be taken when processing. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub struct NodeAlias(pub [u8; 32]); impl fmt::Display for NodeAlias { @@ -1216,6 +1240,18 @@ pub struct NodeInfo { pub announcement_info: Option } +impl NodeInfo { + /// Returns whether the node has only announced Tor addresses. + pub fn is_tor_only(&self) -> bool { + self.announcement_info + .as_ref() + .map(|info| info.addresses()) + .and_then(|addresses| (!addresses.is_empty()).then(|| addresses)) + .map(|addresses| addresses.iter().all(|address| address.is_tor())) + .unwrap_or(false) + } +} + impl fmt::Display for NodeInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, " channels: {:?}, announcement_info: {:?}", @@ -1282,7 +1318,7 @@ impl Writeable for NetworkGraph where L::Target: Logger { fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); - self.genesis_hash.write(writer)?; + self.chain_hash.write(writer)?; let channels = self.channels.read().unwrap(); (channels.len() as u64).write(writer)?; for (ref chan_id, ref chan_info) in channels.unordered_iter() { @@ -1308,16 +1344,18 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { fn read(reader: &mut R, logger: L) -> Result, DecodeError> { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); - let genesis_hash: BlockHash = Readable::read(reader)?; + let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - let mut channels = IndexedMap::new(); + // In Nov, 2023 there were about 15,000 nodes; we cap allocations to 1.5x that. + let mut channels = IndexedMap::with_capacity(cmp::min(channels_count as usize, 22500)); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info = Readable::read(reader)?; channels.insert(chan_id, chan_info); } let nodes_count: u64 = Readable::read(reader)?; - let mut nodes = IndexedMap::new(); + // In Nov, 2023 there were about 69K channels; we cap allocations to 1.5x that. + let mut nodes = IndexedMap::with_capacity(cmp::min(nodes_count as usize, 103500)); for _ in 0..nodes_count { let node_id = Readable::read(reader)?; let node_info = Readable::read(reader)?; @@ -1331,13 +1369,13 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { Ok(NetworkGraph { secp_ctx: Secp256k1::verification_only(), - genesis_hash, + chain_hash, logger, channels: RwLock::new(channels), nodes: RwLock::new(nodes), last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp), - removed_nodes: Mutex::new(HashMap::new()), - removed_channels: Mutex::new(HashMap::new()), + removed_nodes: Mutex::new(new_hash_map()), + removed_channels: Mutex::new(new_hash_map()), pending_checks: utxo::PendingChecks::new(), }) } @@ -1367,7 +1405,7 @@ impl PartialEq for NetworkGraph where L::Target: Logger { let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) }; let (channels_a, channels_b) = (a.0.unsafe_well_ordered_double_lock_self(), b.0.unsafe_well_ordered_double_lock_self()); let (nodes_a, nodes_b) = (a.1.unsafe_well_ordered_double_lock_self(), b.1.unsafe_well_ordered_double_lock_self()); - self.genesis_hash.eq(&other.genesis_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b) + self.chain_hash.eq(&other.chain_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b) } } @@ -1376,13 +1414,13 @@ impl NetworkGraph where L::Target: Logger { pub fn new(network: Network, logger: L) -> NetworkGraph { Self { secp_ctx: Secp256k1::verification_only(), - genesis_hash: genesis_block(network).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(network), logger, channels: RwLock::new(IndexedMap::new()), nodes: RwLock::new(IndexedMap::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), - removed_channels: Mutex::new(HashMap::new()), - removed_nodes: Mutex::new(HashMap::new()), + removed_channels: Mutex::new(new_hash_map()), + removed_nodes: Mutex::new(new_hash_map()), pending_checks: utxo::PendingChecks::new(), } } @@ -1608,7 +1646,7 @@ impl NetworkGraph where L::Target: Logger { return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); } - if msg.chain_hash != self.genesis_hash { + if msg.chain_hash != self.chain_hash { return Err(LightningError { err: "Channel announcement chain hash does not match genesis hash".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Debug), @@ -1828,7 +1866,7 @@ impl NetworkGraph where L::Target: Logger { // NOTE: In the case of no-std, we won't have access to the current UNIX time at the time of removal, // so we'll just set the removal time here to the current UNIX time on the very next invocation // of this function. - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] { let mut tracked_time = Some(current_time_unix); core::mem::swap(time, &mut tracked_time); @@ -1845,14 +1883,14 @@ impl NetworkGraph where L::Target: Logger { /// For an already known (from announcement) channel, update info about one of the directions /// of the channel. /// - /// You probably don't want to call this directly, instead relying on a P2PGossipSync's - /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept + /// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s + /// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. /// /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. pub fn update_channel(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> { - self.update_channel_intern(&msg.contents, Some(&msg), Some(&msg.signature)) + self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), false) } /// For an already known (from announcement) channel, update info about one of the directions @@ -1862,13 +1900,26 @@ impl NetworkGraph where L::Target: Logger { /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. pub fn update_channel_unsigned(&self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { - self.update_channel_intern(msg, None, None) + self.update_channel_internal(msg, None, None, false) } - fn update_channel_intern(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>) -> Result<(), LightningError> { + /// For an already known (from announcement) channel, verify the given [`ChannelUpdate`]. + /// + /// This checks whether the update currently is applicable by [`Self::update_channel`]. + /// + /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or + /// materially in the future will be rejected. + pub fn verify_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> { + self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), true) + } + + fn update_channel_internal(&self, msg: &msgs::UnsignedChannelUpdate, + full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>, + only_verify: bool) -> Result<(), LightningError> + { let chan_enabled = msg.flags & (1 << 1) != (1 << 1); - if msg.chain_hash != self.genesis_hash { + if msg.chain_hash != self.chain_hash { return Err(LightningError { err: "Channel update chain hash does not match genesis hash".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Debug), @@ -1895,7 +1946,10 @@ impl NetworkGraph where L::Target: Logger { None => { core::mem::drop(channels); self.pending_checks.check_hold_pending_channel_update(msg, full_msg)?; - return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError { + err: "Couldn't find channel for update".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Gossip), + }); }, Some(channel) => { if msg.htlc_maximum_msat > MAX_VALUE_MSAT { @@ -1952,7 +2006,7 @@ impl NetworkGraph where L::Target: Logger { } } } - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]); if msg.flags & 1 == 1 { check_update_latest!(channel.two_to_one); if let Some(sig) = sig { @@ -1961,7 +2015,9 @@ impl NetworkGraph where L::Target: Logger { action: ErrorAction::IgnoreAndLog(Level::Debug) })?, "channel_update"); } - channel.two_to_one = get_new_channel_info!(); + if !only_verify { + channel.two_to_one = get_new_channel_info!(); + } } else { check_update_latest!(channel.one_to_two); if let Some(sig) = sig { @@ -1970,7 +2026,9 @@ impl NetworkGraph where L::Target: Logger { action: ErrorAction::IgnoreAndLog(Level::Debug) })?, "channel_update"); } - channel.one_to_two = get_new_channel_info!(); + if !only_verify { + channel.one_to_two = get_new_channel_info!(); + } } } } @@ -2052,6 +2110,7 @@ pub(crate) mod tests { use crate::ln::chan_utils::make_funding_redeemscript; #[cfg(feature = "std")] use crate::ln::features::InitFeatures; + use crate::ln::msgs::SocketAddress; use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo}; use crate::routing::utxo::{UtxoLookupError, UtxoResult}; use crate::ln::msgs::{RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, @@ -2059,7 +2118,7 @@ pub(crate) mod tests { ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; use crate::util::config::UserConfig; use crate::util::test_utils; - use crate::util::ser::{ReadableArgs, Readable, Writeable}; + use crate::util::ser::{Hostname, ReadableArgs, Readable, Writeable}; use crate::util::scid_utils::scid_from_parts; use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS; @@ -2067,13 +2126,11 @@ pub(crate) mod tests { use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; + use bitcoin::hashes::hex::FromHex; use bitcoin::network::constants::Network; - use bitcoin::blockdata::constants::genesis_block; - use bitcoin::blockdata::script::Script; + use bitcoin::blockdata::constants::ChainHash; + use bitcoin::blockdata::script::ScriptBuf; use bitcoin::blockdata::transaction::TxOut; - - use hex; - use bitcoin::secp256k1::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; @@ -2102,7 +2159,7 @@ pub(crate) mod tests { fn request_full_sync_finite_times() { let network_graph = create_network_graph(); let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); - let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); + let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); assert!(gossip_sync.should_request_full_sync(&node_id)); assert!(gossip_sync.should_request_full_sync(&node_id)); @@ -2140,7 +2197,7 @@ pub(crate) mod tests { let mut unsigned_announcement = UnsignedChannelAnnouncement { features: channelmanager::provided_channel_features(&UserConfig::default()), - chain_hash: genesis_block(Network::Testnet).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 0, node_id_1: NodeId::from_pubkey(&node_id_1), node_id_2: NodeId::from_pubkey(&node_id_2), @@ -2159,7 +2216,7 @@ pub(crate) mod tests { } } - pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> Script { + pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> ScriptBuf { let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap(); make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey), @@ -2168,7 +2225,7 @@ pub(crate) mod tests { pub(crate) fn get_signed_channel_update(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelUpdate { let mut unsigned_channel_update = UnsignedChannelUpdate { - chain_hash: genesis_block(Network::Testnet).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 0, timestamp: 100, flags: 0, @@ -2216,7 +2273,7 @@ pub(crate) mod tests { Err(_) => panic!() }; - let fake_msghash = hash_to_message!(&zero_hash); + let fake_msghash = hash_to_message!(zero_hash.as_byte_array()); match gossip_sync.handle_node_announcement( &NodeAnnouncement { signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey), @@ -2373,7 +2430,7 @@ pub(crate) mod tests { // Test that channel announcements with the wrong chain hash are ignored (network graph is testnet, // announcement is mainnet). let incorrect_chain_announcement = get_signed_channel_announcement(|unsigned_announcement| { - unsigned_announcement.chain_hash = genesis_block(Network::Bitcoin).header.block_hash(); + unsigned_announcement.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); }, node_1_privkey, node_2_privkey, &secp_ctx); match gossip_sync.handle_channel_announcement(&incorrect_chain_announcement) { Ok(_) => panic!(), @@ -2411,6 +2468,7 @@ pub(crate) mod tests { } let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); + network_graph.verify_channel_update(&valid_channel_update).unwrap(); match gossip_sync.handle_channel_update(&valid_channel_update) { Ok(res) => assert!(res), _ => panic!(), @@ -2477,7 +2535,7 @@ pub(crate) mod tests { unsigned_channel_update.timestamp += 500; }, node_1_privkey, &secp_ctx); let zero_hash = Sha256dHash::hash(&[0; 32]); - let fake_msghash = hash_to_message!(&zero_hash); + let fake_msghash = hash_to_message!(zero_hash.as_byte_array()); invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey); match gossip_sync.handle_channel_update(&invalid_sig_channel_update) { Ok(_) => panic!(), @@ -2487,7 +2545,7 @@ pub(crate) mod tests { // Test that channel updates with the wrong chain hash are ignored (network graph is testnet, channel // update is mainet). let incorrect_chain_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.chain_hash = genesis_block(Network::Bitcoin).header.block_hash(); + unsigned_channel_update.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); }, node_1_privkey, &secp_ctx); match gossip_sync.handle_channel_update(&incorrect_chain_update) { @@ -2513,7 +2571,8 @@ pub(crate) mod tests { let short_channel_id; { - // Announce a channel we will update + // Check we won't apply an update via `handle_network_update` for privacy reasons, but + // can continue fine if we manually apply it. let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); short_channel_id = valid_channel_announcement.contents.short_channel_id; let chain_source: Option<&test_utils::TestChainSource> = None; @@ -2524,10 +2583,11 @@ pub(crate) mod tests { assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); network_graph.handle_network_update(&NetworkUpdate::ChannelUpdateMessage { - msg: valid_channel_update, + msg: valid_channel_update.clone(), }); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); + assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); + network_graph.update_channel(&valid_channel_update).unwrap(); } // Non-permanent failure doesn't touch the channel at all @@ -2925,7 +2985,7 @@ pub(crate) mod tests { let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); // It should ignore if gossip_queries feature is not enabled { @@ -2962,7 +3022,7 @@ pub(crate) mod tests { let network_graph = create_network_graph(); let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); @@ -3014,13 +3074,13 @@ pub(crate) mod tests { &gossip_sync, &node_id_2, QueryChannelRange { - chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Bitcoin), first_blocknum: 0, number_of_blocks: 0xffff_ffff, }, false, vec![ReplyChannelRange { - chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Bitcoin), first_blocknum: 0, number_of_blocks: 0xffff_ffff, sync_complete: true, @@ -3255,7 +3315,7 @@ pub(crate) mod tests { let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); let result = gossip_sync.handle_query_short_channel_ids(&node_id, QueryShortChannelIds { chain_hash, @@ -3314,16 +3374,16 @@ pub(crate) mod tests { assert_eq!(chan_update_info, read_chan_update_info); // Check the serialization hasn't changed. - let legacy_chan_update_info_with_some: Vec = hex::decode("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap(); + let legacy_chan_update_info_with_some: Vec = >::from_hex("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap(); assert_eq!(encoded_chan_update_info, legacy_chan_update_info_with_some); // Check we fail if htlc_maximum_msat is not present in either the ChannelUpdateInfo itself // or the ChannelUpdate enclosed with `last_update_message`. - let legacy_chan_update_info_with_some_and_fail_update: Vec = hex::decode("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap(); + let legacy_chan_update_info_with_some_and_fail_update: Vec = >::from_hex("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap(); let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_some_and_fail_update.as_slice()); assert!(read_chan_update_info_res.is_err()); - let legacy_chan_update_info_with_none: Vec = hex::decode("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap(); + let legacy_chan_update_info_with_none: Vec = >::from_hex("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap(); let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice()); assert!(read_chan_update_info_res.is_err()); @@ -3365,18 +3425,18 @@ pub(crate) mod tests { assert_eq!(chan_info_some_updates, read_chan_info); // Check the serialization hasn't changed. - let legacy_chan_info_with_some: Vec = hex::decode("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); + let legacy_chan_info_with_some: Vec = >::from_hex("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); assert_eq!(encoded_chan_info, legacy_chan_info_with_some); // Check we can decode legacy ChannelInfo, even if the `two_to_one` / `one_to_two` / // `last_update_message` fields fail to decode due to missing htlc_maximum_msat. - let legacy_chan_info_with_some_and_fail_update = hex::decode("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap(); + let legacy_chan_info_with_some_and_fail_update = >::from_hex("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap(); let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_some_and_fail_update.as_slice()).unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); assert_eq!(read_chan_info.two_to_one, None); - let legacy_chan_info_with_none: Vec = hex::decode("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); + let legacy_chan_info_with_none: Vec = >::from_hex("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); @@ -3386,7 +3446,7 @@ pub(crate) mod tests { #[test] fn node_info_is_readable() { // 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one - let announcement_message = hex::decode("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); + let announcement_message = >::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); let announcement_message = NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap(); let valid_node_ann_info = NodeAnnouncementInfo { features: channelmanager::provided_node_features(&UserConfig::default()), @@ -3402,7 +3462,7 @@ pub(crate) mod tests { assert_eq!(read_valid_node_ann_info, valid_node_ann_info); assert_eq!(read_valid_node_ann_info.addresses().len(), 1); - let encoded_invalid_node_ann_info = hex::decode("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); + let encoded_invalid_node_ann_info = >::from_hex("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); let read_invalid_node_ann_info_res = NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice()); assert!(read_invalid_node_ann_info_res.is_err()); @@ -3417,14 +3477,14 @@ pub(crate) mod tests { let read_valid_node_info = NodeInfo::read(&mut encoded_valid_node_info.as_slice()).unwrap(); assert_eq!(read_valid_node_info, valid_node_info); - let encoded_invalid_node_info_hex = hex::decode("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); + let encoded_invalid_node_info_hex = >::from_hex("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); let read_invalid_node_info = NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap(); assert_eq!(read_invalid_node_info.announcement_info, None); } #[test] fn test_node_info_keeps_compatibility() { - let old_ann_info_with_addresses = hex::decode("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); + let old_ann_info_with_addresses = >::from_hex("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); let ann_info_with_addresses = NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice()) .expect("to be able to read an old NodeAnnouncementInfo with addresses"); // This serialized info has an address field but no announcement_message, therefore the addresses returned by our function will still be empty @@ -3436,6 +3496,112 @@ pub(crate) mod tests { let node_id = NodeId([42; 33]); assert_eq!(format!("{}", &node_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"); } + + #[test] + fn is_tor_only_node() { + let network_graph = create_network_graph(); + let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); + + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + let node_1_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey)); + + let announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + gossip_sync.handle_channel_announcement(&announcement).unwrap(); + + let tcp_ip_v4 = SocketAddress::TcpIpV4 { + addr: [255, 254, 253, 252], + port: 9735 + }; + let tcp_ip_v6 = SocketAddress::TcpIpV6 { + addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240], + port: 9735 + }; + let onion_v2 = SocketAddress::OnionV2([255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]); + let onion_v3 = SocketAddress::OnionV3 { + ed25519_pubkey: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224], + checksum: 32, + version: 16, + port: 9735 + }; + let hostname = SocketAddress::Hostname { + hostname: Hostname::try_from(String::from("host")).unwrap(), + port: 9735, + }; + + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![ + tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone(), + hostname.clone() + ]; + announcement.timestamp += 1000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![ + tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone() + ]; + announcement.timestamp += 2000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![ + tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone() + ]; + announcement.timestamp += 3000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![onion_v2.clone(), onion_v3.clone()]; + announcement.timestamp += 4000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![onion_v2.clone()]; + announcement.timestamp += 5000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + + let announcement = get_signed_node_announcement( + |announcement| { + announcement.addresses = vec![tcp_ip_v4.clone()]; + announcement.timestamp += 6000; + }, + node_1_privkey, &secp_ctx + ); + gossip_sync.handle_node_announcement(&announcement).unwrap(); + assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); + } } #[cfg(ldk_bench)]