X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fgossip.rs;h=c5c08cf40321185a864aee4e814d39cc62f91abd;hb=fbaa3c4855ec3ebfb62fe91437f4237582db2577;hp=b72b49978fd8cef1bce5b3cd8a969ae847021453;hpb=1fd95496d1038352535a74065d2b06559e2192fb;p=rust-lightning diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index b72b4997..c5c08cf4 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! The top-level network map tracking logic lives here. +//! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; use bitcoin::secp256k1::PublicKey; @@ -16,20 +16,21 @@ use bitcoin::secp256k1; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; -use bitcoin::blockdata::transaction::TxOut; +use bitcoin::hashes::hex::FromHex; use bitcoin::hash_types::BlockHash; -use crate::chain; -use crate::chain::Access; -use crate::ln::chan_utils::make_funding_redeemscript_from_slices; +use bitcoin::network::constants::Network; +use bitcoin::blockdata::constants::genesis_block; + +use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures}; use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter}; use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use crate::ln::msgs; +use crate::routing::utxo::{self, UtxoLookup, UtxoResolver}; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, MaybeReadable}; use crate::util::logger::{Logger, Level}; -use crate::util::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; use crate::util::string::PrintableString; use crate::util::indexed_map::{IndexedMap, Entry as IndexedMapEntry}; @@ -38,12 +39,13 @@ use crate::io; use crate::io_extras::{copy, sink}; use crate::prelude::*; use core::{cmp, fmt}; +use core::convert::TryFrom; use crate::sync::{RwLock, RwLockReadGuard}; #[cfg(feature = "std")] use core::sync::atomic::{AtomicUsize, Ordering}; use crate::sync::Mutex; use core::ops::{Bound, Deref}; -use bitcoin::hashes::hex::ToHex; +use core::str::FromStr; #[cfg(feature = "std")] use std::time::{SystemTime, UNIX_EPOCH}; @@ -77,6 +79,11 @@ impl NodeId { pub fn as_slice(&self) -> &[u8] { &self.0 } + + /// Get the public key from this NodeId + pub fn as_pubkey(&self) -> Result { + PublicKey::from_slice(&self.0) + } } impl fmt::Debug for NodeId { @@ -131,6 +138,29 @@ impl Readable for NodeId { } } +impl From for NodeId { + fn from(pubkey: PublicKey) -> Self { + Self::from_pubkey(&pubkey) + } +} + +impl TryFrom for PublicKey { + type Error = secp256k1::Error; + + fn try_from(node_id: NodeId) -> Result { + node_id.as_pubkey() + } +} + +impl FromStr for NodeId { + type Err = bitcoin::hashes::hex::Error; + + fn from_str(s: &str) -> Result { + let data: [u8; PUBLIC_KEY_SIZE] = FromHex::from_hex(s)?; + Ok(NodeId(data)) + } +} + /// Represents the network as nodes and channels between them pub struct NetworkGraph where L::Target: Logger { secp_ctx: Secp256k1, @@ -159,6 +189,8 @@ pub struct NetworkGraph where L::Target: Logger { /// resync them from gossip. Each `NodeId` is mapped to the time (in seconds) it was removed so /// that once some time passes, we can potentially resync it from gossip again. removed_nodes: Mutex>>, + /// Announcement messages which are awaiting an on-chain lookup to be processed. + pub(super) pending_checks: utxo::PendingChecks, } /// A read-only view of [`NetworkGraph`]. @@ -180,7 +212,7 @@ pub enum NetworkUpdate { msg: ChannelUpdate, }, /// An error indicating that a channel failed to route a payment, which should be applied via - /// [`NetworkGraph::channel_failed`]. + /// [`NetworkGraph::channel_failed_permanent`] if permanent. ChannelFailure { /// The short channel id of the closed channel. short_channel_id: u64, @@ -218,31 +250,30 @@ impl_writeable_tlv_based_enum_upgradable!(NetworkUpdate, /// This network graph is then used for routing payments. /// Provides interface to help with initial routing sync by /// serving historical announcements. -pub struct P2PGossipSync>, C: Deref, L: Deref> -where C::Target: chain::Access, L::Target: Logger +pub struct P2PGossipSync>, U: Deref, L: Deref> +where U::Target: UtxoLookup, L::Target: Logger { network_graph: G, - chain_access: Option, + utxo_lookup: Option, #[cfg(feature = "std")] full_syncs_requested: AtomicUsize, pending_events: Mutex>, logger: L, } -impl>, C: Deref, L: Deref> P2PGossipSync -where C::Target: chain::Access, L::Target: Logger +impl>, U: Deref, L: Deref> P2PGossipSync +where U::Target: UtxoLookup, L::Target: Logger { /// Creates a new tracker of the actual state of the network of channels and nodes, - /// assuming an existing Network Graph. - /// Chain monitor is used to make sure announced channels exist on-chain, - /// channel data is correct, and that the announcement is signed with - /// channel owners' keys. - pub fn new(network_graph: G, chain_access: Option, logger: L) -> Self { + /// assuming an existing [`NetworkGraph`]. + /// UTXO lookup is used to make sure announced channels exist on-chain, channel data is + /// correct, and the announcement is signed with channel owners' keys. + pub fn new(network_graph: G, utxo_lookup: Option, logger: L) -> Self { P2PGossipSync { network_graph, #[cfg(feature = "std")] full_syncs_requested: AtomicUsize::new(0), - chain_access, + utxo_lookup, pending_events: Mutex::new(vec![]), logger, } @@ -251,14 +282,14 @@ where C::Target: chain::Access, L::Target: Logger /// Adds a provider used to check new announcements. Does not affect /// existing announcements unless they are updated. /// Add, update or remove the provider would replace the current one. - pub fn add_chain_access(&mut self, chain_access: Option) { - self.chain_access = chain_access; + pub fn add_utxo_lookup(&mut self, utxo_lookup: Option) { + self.utxo_lookup = utxo_lookup; } /// Gets a reference to the underlying [`NetworkGraph`] which was provided in /// [`P2PGossipSync::new`]. /// - /// (C-not exported) as bindings don't support a reference-to-a-reference yet + /// This is not exported to bindings users as bindings don't support a reference-to-a-reference yet pub fn network_graph(&self) -> &G { &self.network_graph } @@ -275,12 +306,42 @@ where C::Target: chain::Access, L::Target: Logger false } } + + /// Used to broadcast forward gossip messages which were validated async. + /// + /// Note that this will ignore events other than `Broadcast*` or messages with too much excess + /// data. + pub(super) fn forward_gossip_msg(&self, mut ev: MessageSendEvent) { + match &mut ev { + MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { return; } + if update_msg.as_ref() + .map(|msg| msg.contents.excess_data.len()).unwrap_or(0) > MAX_EXCESS_BYTES_FOR_RELAY + { + *update_msg = None; + } + }, + MessageSendEvent::BroadcastChannelUpdate { msg } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { return; } + }, + MessageSendEvent::BroadcastNodeAnnouncement { msg } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY || + msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY || + msg.contents.excess_data.len() + msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + { + return; + } + }, + _ => return, + } + self.pending_events.lock().unwrap().push(ev); + } } impl NetworkGraph where L::Target: Logger { /// Handles any network updates originating from [`Event`]s. /// - /// [`Event`]: crate::util::events::Event + /// [`Event`]: crate::events::Event pub fn handle_network_update(&self, network_update: &NetworkUpdate) { match *network_update { NetworkUpdate::ChannelUpdateMessage { ref msg } => { @@ -291,9 +352,10 @@ impl NetworkGraph where L::Target: Logger { let _ = self.update_channel(msg); }, NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => { - let action = if is_permanent { "Removing" } else { "Disabling" }; - log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", action, short_channel_id); - self.channel_failed(short_channel_id, is_permanent); + if is_permanent { + log_debug!(self.logger, "Removing channel graph entry for {} due to a payment failure.", short_channel_id); + self.channel_failed_permanent(short_channel_id); + } }, NetworkUpdate::NodeFailure { ref node_id, is_permanent } => { if is_permanent { @@ -342,8 +404,8 @@ macro_rules! get_pubkey_from_node_id { } } -impl>, C: Deref, L: Deref> RoutingMessageHandler for P2PGossipSync -where C::Target: chain::Access, L::Target: Logger +impl>, U: Deref, L: Deref> RoutingMessageHandler for P2PGossipSync +where U::Target: UtxoLookup, L::Target: Logger { fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { self.network_graph.update_node_from_announcement(msg)?; @@ -353,8 +415,7 @@ where C::Target: chain::Access, L::Target: Logger } fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { - self.network_graph.update_channel_from_announcement(msg, &self.chain_access)?; - log_gossip!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); + self.network_graph.update_channel_from_announcement(msg, &self.utxo_lookup)?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } @@ -364,7 +425,7 @@ where C::Target: chain::Access, L::Target: Logger } fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(ChannelAnnouncement, Option, Option)> { - let channels = self.network_graph.channels.read().unwrap(); + let mut channels = self.network_graph.channels.write().unwrap(); for (_, ref chan) in channels.range(starting_point..) { if chan.announcement_message.is_some() { let chan_announcement = chan.announcement_message.clone().unwrap(); @@ -385,10 +446,10 @@ where C::Target: chain::Access, L::Target: Logger None } - fn get_next_node_announcement(&self, starting_point: Option<&PublicKey>) -> Option { - let nodes = self.network_graph.nodes.read().unwrap(); - let iter = if let Some(pubkey) = starting_point { - nodes.range((Bound::Excluded(NodeId::from_pubkey(pubkey)), Bound::Unbounded)) + fn get_next_node_announcement(&self, starting_point: Option<&NodeId>) -> Option { + let mut nodes = self.network_graph.nodes.write().unwrap(); + let iter = if let Some(node_id) = starting_point { + nodes.range((Bound::Excluded(node_id), Bound::Unbounded)) } else { nodes.range(..) }; @@ -403,15 +464,21 @@ where C::Target: chain::Access, L::Target: Logger } /// Initiates a stateless sync of routing gossip information with a peer - /// using gossip_queries. The default strategy used by this implementation + /// using [`gossip_queries`]. The default strategy used by this implementation /// is to sync the full block range with several peers. /// - /// We should expect one or more reply_channel_range messages in response - /// to our query_channel_range. Each reply will enqueue a query_scid message + /// We should expect one or more [`reply_channel_range`] messages in response + /// to our [`query_channel_range`]. Each reply will enqueue a [`query_scid`] message /// to request gossip messages for each channel. The sync is considered complete - /// when the final reply_scids_end message is received, though we are not + /// when the final [`reply_scids_end`] message is received, though we are not /// tracking this directly. - fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init) -> Result<(), ()> { + /// + /// [`gossip_queries`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#query-messages + /// [`reply_channel_range`]: msgs::ReplyChannelRange + /// [`query_channel_range`]: msgs::QueryChannelRange + /// [`query_scid`]: msgs::QueryShortChannelIds + /// [`reply_scids_end`]: msgs::ReplyShortChannelIdsEnd + fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init, _inbound: bool) -> Result<(), ()> { // We will only perform a sync with peers that support gossip_queries. if !init_msg.features.supports_gossip_queries() { // Don't disconnect peers for not supporting gossip queries. We may wish to have @@ -546,7 +613,7 @@ where C::Target: chain::Access, L::Target: Logger // (has at least one update). A peer may still want to know the channel // exists even if its not yet routable. let mut batches: Vec> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)]; - let channels = self.network_graph.channels.read().unwrap(); + let mut channels = self.network_graph.channels.write().unwrap(); for (_, ref chan) in channels.range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) { if let Some(chan_announcement) = &chan.announcement_message { // Construct a new batch if last one is full @@ -630,11 +697,15 @@ where C::Target: chain::Access, L::Target: Logger features.set_gossip_queries_optional(); features } + + fn processing_queue_high(&self) -> bool { + self.network_graph.pending_checks.too_many_checks_pending() + } } -impl>, C: Deref, L: Deref> MessageSendEventsProvider for P2PGossipSync +impl>, U: Deref, L: Deref> MessageSendEventsProvider for P2PGossipSync where - C::Target: chain::Access, + U::Target: UtxoLookup, L::Target: Logger, { fn get_and_clear_pending_msg_events(&self) -> Vec { @@ -853,9 +924,9 @@ impl Readable for ChannelInfo { (0, features, required), (1, announcement_received_time, (default_value, 0)), (2, node_one, required), - (4, one_to_two_wrap, ignorable), + (4, one_to_two_wrap, upgradable_option), (6, node_two, required), - (8, two_to_one_wrap, ignorable), + (8, two_to_one_wrap, upgradable_option), (10, capacity_sats, required), (12, announcement_message, required), }); @@ -938,7 +1009,7 @@ impl<'a> fmt::Debug for DirectedChannelInfo<'a> { /// /// While this may be smaller than the actual channel capacity, amounts greater than /// [`Self::as_msat`] should not be routed through the channel. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub enum EffectiveCapacity { /// The available liquidity in the channel known from being a channel counterparty, and thus a /// direct hop. @@ -985,9 +1056,9 @@ impl EffectiveCapacity { } /// Fees for routing via a given channel or a node -#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)] +#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash, Ord, PartialOrd)] pub struct RoutingFees { - /// Flat routing fee in satoshis + /// Flat routing fee in millisatoshis. pub base_msat: u32, /// Liquidity-based routing fee in millionths of a routed amount. /// In other words, 10000 is 1%. @@ -1013,8 +1084,6 @@ pub struct NodeAnnouncementInfo { /// May be invalid or malicious (eg control chars), /// should not be exposed to the user. pub alias: NodeAlias, - /// Internet-level addresses via which one can connect to the node - pub addresses: Vec, /// An initial announcement of the node /// Mostly redundant with the data we store in fields explicitly. /// Everything else is useful only for sending out for initial routing sync. @@ -1022,20 +1091,51 @@ pub struct NodeAnnouncementInfo { pub announcement_message: Option } -impl_writeable_tlv_based!(NodeAnnouncementInfo, { - (0, features, required), - (2, last_update, required), - (4, rgb, required), - (6, alias, required), - (8, announcement_message, option), - (10, addresses, vec_type), -}); +impl NodeAnnouncementInfo { + /// Internet-level addresses via which one can connect to the node + pub fn addresses(&self) -> &[NetAddress] { + self.announcement_message.as_ref() + .map(|msg| msg.contents.addresses.as_slice()) + .unwrap_or_default() + } +} + +impl Writeable for NodeAnnouncementInfo { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + let empty_addresses = Vec::::new(); + write_tlv_fields!(writer, { + (0, self.features, required), + (2, self.last_update, required), + (4, self.rgb, required), + (6, self.alias, required), + (8, self.announcement_message, option), + (10, empty_addresses, vec_type), // Versions prior to 0.0.115 require this field + }); + Ok(()) + } +} + +impl Readable for NodeAnnouncementInfo { + fn read(reader: &mut R) -> Result { + _init_and_read_tlv_fields!(reader, { + (0, features, required), + (2, last_update, required), + (4, rgb, required), + (6, alias, required), + (8, announcement_message, option), + (10, _addresses, vec_type), // deprecated, not used anymore + }); + let _: Option> = _addresses; + Ok(Self { features: features.0.unwrap(), last_update: last_update.0.unwrap(), rgb: rgb.0.unwrap(), + alias: alias.0.unwrap(), announcement_message }) + } +} /// A user-defined name for a node, which may be used when displaying the node in a graph. /// /// Since node aliases are provided by third parties, they are a potential avenue for injection /// attacks. Care must be taken when processing. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct NodeAlias(pub [u8; 32]); impl fmt::Display for NodeAlias { @@ -1100,7 +1200,7 @@ impl Writeable for NodeInfo { } } -// A wrapper allowing for the optional deseralization of `NodeAnnouncementInfo`. Utilizing this is +// A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is // necessary to maintain compatibility with previous serializations of `NetAddress` that have an // invalid hostname set. We ignore and eat all errors until we are either able to read a // `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end. @@ -1131,7 +1231,7 @@ impl Readable for NodeInfo { read_tlv_fields!(reader, { (0, _lowest_inbound_channel_fees, option), - (2, announcement_info_wrap, ignorable), + (2, announcement_info_wrap, upgradable_option), (4, channels, vec_type), }); @@ -1205,6 +1305,7 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp), removed_nodes: Mutex::new(HashMap::new()), removed_channels: Mutex::new(HashMap::new()), + pending_checks: utxo::PendingChecks::new(), }) } } @@ -1234,16 +1335,17 @@ impl PartialEq for NetworkGraph where L::Target: Logger { impl NetworkGraph where L::Target: Logger { /// Creates a new, empty, network graph. - pub fn new(genesis_hash: BlockHash, logger: L) -> NetworkGraph { + pub fn new(network: Network, logger: L) -> NetworkGraph { Self { secp_ctx: Secp256k1::verification_only(), - genesis_hash, + genesis_hash: genesis_block(network).header.block_hash(), logger, channels: RwLock::new(IndexedMap::new()), nodes: RwLock::new(IndexedMap::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), removed_channels: Mutex::new(HashMap::new()), removed_nodes: Mutex::new(HashMap::new()), + pending_checks: utxo::PendingChecks::new(), } } @@ -1286,7 +1388,7 @@ impl NetworkGraph where L::Target: Logger { /// routing messages from a source using a protocol other than the lightning P2P protocol. pub fn update_node_from_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<(), LightningError> { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id, "node_announcement"); + secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement"); self.update_node_from_announcement_intern(&msg.contents, Some(&msg)) } @@ -1299,8 +1401,13 @@ impl NetworkGraph where L::Target: Logger { } fn update_node_from_announcement_intern(&self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>) -> Result<(), LightningError> { - match self.nodes.write().unwrap().get_mut(&NodeId::from_pubkey(&msg.node_id)) { - None => Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}), + let mut nodes = self.nodes.write().unwrap(); + match nodes.get_mut(&msg.node_id) { + None => { + core::mem::drop(nodes); + self.pending_checks.check_hold_pending_node_announcement(msg, full_msg)?; + Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}) + }, Some(node) => { if let Some(node_info) = node.announcement_info.as_ref() { // The timestamp field is somewhat of a misnomer - the BOLTs use it to order @@ -1321,8 +1428,7 @@ impl NetworkGraph where L::Target: Logger { features: msg.features.clone(), last_update: msg.timestamp, rgb: msg.rgb, - alias: NodeAlias(msg.alias), - addresses: msg.addresses.clone(), + alias: msg.alias, announcement_message: if should_relay { full_msg.cloned() } else { None }, }); @@ -1333,39 +1439,52 @@ impl NetworkGraph where L::Target: Logger { /// Store or update channel info from a channel announcement. /// - /// You probably don't want to call this directly, instead relying on a P2PGossipSync's - /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept + /// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s + /// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. /// - /// If a `chain::Access` object is provided via `chain_access`, it will be called to verify + /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify /// the corresponding UTXO exists on chain and is correctly-formatted. - pub fn update_channel_from_announcement( - &self, msg: &msgs::ChannelAnnouncement, chain_access: &Option, + pub fn update_channel_from_announcement( + &self, msg: &msgs::ChannelAnnouncement, utxo_lookup: &Option, ) -> Result<(), LightningError> where - C::Target: chain::Access, + U::Target: UtxoLookup, { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement"); secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement"); secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement"); secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), "channel_announcement"); - self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), chain_access) + self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup) + } + + /// Store or update channel info from a channel announcement. + /// + /// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s + /// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept + /// routing messages from a source using a protocol other than the lightning P2P protocol. + /// + /// This will skip verification of if the channel is actually on-chain. + pub fn update_channel_from_announcement_no_lookup( + &self, msg: &ChannelAnnouncement + ) -> Result<(), LightningError> { + self.update_channel_from_announcement::<&UtxoResolver>(msg, &None) } /// Store or update channel info from a channel announcement without verifying the associated /// signatures. Because we aren't given the associated signatures here we cannot relay the /// channel announcement to any of our peers. /// - /// If a `chain::Access` object is provided via `chain_access`, it will be called to verify + /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify /// the corresponding UTXO exists on chain and is correctly-formatted. - pub fn update_channel_from_unsigned_announcement( - &self, msg: &msgs::UnsignedChannelAnnouncement, chain_access: &Option + pub fn update_channel_from_unsigned_announcement( + &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option ) -> Result<(), LightningError> where - C::Target: chain::Access, + U::Target: UtxoLookup, { - self.update_channel_from_unsigned_announcement_intern(msg, None, chain_access) + self.update_channel_from_unsigned_announcement_intern(msg, None, utxo_lookup) } /// Update channel from partial announcement data received via rapid gossip sync @@ -1444,16 +1563,23 @@ impl NetworkGraph where L::Target: Logger { Ok(()) } - fn update_channel_from_unsigned_announcement_intern( - &self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, chain_access: &Option + fn update_channel_from_unsigned_announcement_intern( + &self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, utxo_lookup: &Option ) -> Result<(), LightningError> where - C::Target: chain::Access, + U::Target: UtxoLookup, { if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 { return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); } + if msg.chain_hash != self.genesis_hash { + return Err(LightningError { + err: "Channel announcement chain hash does not match genesis hash".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + }); + } + { let channels = self.channels.read().unwrap(); @@ -1476,7 +1602,7 @@ impl NetworkGraph where L::Target: Logger { action: ErrorAction::IgnoreDuplicateGossip }); } - } else if chain_access.is_none() { + } else if utxo_lookup.is_none() { // Similarly, if we can't check the chain right now anyway, ignore the // duplicate announcement without bothering to take the channels write lock. return Err(LightningError { @@ -1499,32 +1625,8 @@ impl NetworkGraph where L::Target: Logger { } } - let utxo_value = match &chain_access { - &None => { - // Tentatively accept, potentially exposing us to DoS attacks - None - }, - &Some(ref chain_access) => { - match chain_access.get_utxo(&msg.chain_hash, msg.short_channel_id) { - Ok(TxOut { value, script_pubkey }) => { - let expected_script = - make_funding_redeemscript_from_slices(msg.bitcoin_key_1.as_slice(), msg.bitcoin_key_2.as_slice()).to_v0_p2wsh(); - if script_pubkey != expected_script { - return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", expected_script.to_hex(), script_pubkey.to_hex()), action: ErrorAction::IgnoreError}); - } - //TODO: Check if value is worth storing, use it to inform routing, and compare it - //to the new HTLC max field in channel_update - Some(value) - }, - Err(chain::AccessError::UnknownChain) => { - return Err(LightningError{err: format!("Channel announced on an unknown chain ({})", msg.chain_hash.encode().to_hex()), action: ErrorAction::IgnoreError}); - }, - Err(chain::AccessError::UnknownTx) => { - return Err(LightningError{err: "Channel announced without corresponding UTXO entry".to_owned(), action: ErrorAction::IgnoreError}); - }, - } - }, - }; + let utxo_value = self.pending_checks.check_channel_announcement( + utxo_lookup, msg, full_msg)?; #[allow(unused_mut, unused_assignments)] let mut announcement_received_time = 0; @@ -1545,43 +1647,33 @@ impl NetworkGraph where L::Target: Logger { announcement_received_time, }; - self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value) + self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value)?; + + log_gossip!(self.logger, "Added channel_announcement for {}{}", msg.short_channel_id, if !msg.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); + Ok(()) } - /// Marks a channel in the graph as failed if a corresponding HTLC fail was sent. - /// If permanent, removes a channel from the local storage. - /// May cause the removal of nodes too, if this was their last channel. - /// If not permanent, makes channels unavailable for routing. - pub fn channel_failed(&self, short_channel_id: u64, is_permanent: bool) { + /// Marks a channel in the graph as failed permanently. + /// + /// The channel and any node for which this was their last channel are removed from the graph. + pub fn channel_failed_permanent(&self, short_channel_id: u64) { #[cfg(feature = "std")] let current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs()); #[cfg(not(feature = "std"))] let current_time_unix = None; - self.channel_failed_with_time(short_channel_id, is_permanent, current_time_unix) + self.channel_failed_permanent_with_time(short_channel_id, current_time_unix) } - /// Marks a channel in the graph as failed if a corresponding HTLC fail was sent. - /// If permanent, removes a channel from the local storage. - /// May cause the removal of nodes too, if this was their last channel. - /// If not permanent, makes channels unavailable for routing. - fn channel_failed_with_time(&self, short_channel_id: u64, is_permanent: bool, current_time_unix: Option) { + /// Marks a channel in the graph as failed permanently. + /// + /// The channel and any node for which this was their last channel are removed from the graph. + fn channel_failed_permanent_with_time(&self, short_channel_id: u64, current_time_unix: Option) { let mut channels = self.channels.write().unwrap(); - if is_permanent { - if let Some(chan) = channels.remove(&short_channel_id) { - let mut nodes = self.nodes.write().unwrap(); - self.removed_channels.lock().unwrap().insert(short_channel_id, current_time_unix); - Self::remove_channel_in_nodes(&mut nodes, &chan, short_channel_id); - } - } else { - if let Some(chan) = channels.get_mut(&short_channel_id) { - if let Some(one_to_two) = chan.one_to_two.as_mut() { - one_to_two.enabled = false; - } - if let Some(two_to_one) = chan.two_to_one.as_mut() { - two_to_one.enabled = false; - } - } + if let Some(chan) = channels.remove(&short_channel_id) { + let mut nodes = self.nodes.write().unwrap(); + self.removed_channels.lock().unwrap().insert(short_channel_id, current_time_unix); + Self::remove_channel_in_nodes(&mut nodes, &chan, short_channel_id); } } @@ -1734,6 +1826,13 @@ impl NetworkGraph where L::Target: Logger { fn update_channel_intern(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>) -> Result<(), LightningError> { let chan_enabled = msg.flags & (1 << 1) != (1 << 1); + if msg.chain_hash != self.genesis_hash { + return Err(LightningError { + err: "Channel update chain hash does not match genesis hash".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + }); + } + #[cfg(all(feature = "std", not(test), not(feature = "_test_utils")))] { // Note that many tests rely on being able to set arbitrarily old timestamps, thus we @@ -1749,7 +1848,11 @@ impl NetworkGraph where L::Target: Logger { let mut channels = self.channels.write().unwrap(); match channels.get_mut(&msg.short_channel_id) { - None => return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}), + None => { + core::mem::drop(channels); + self.pending_checks.check_hold_pending_channel_update(msg, full_msg)?; + return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}); + }, Some(channel) => { if msg.htlc_maximum_msat > MAX_VALUE_MSAT { return Err(LightningError{err: @@ -1855,7 +1958,7 @@ impl NetworkGraph where L::Target: Logger { impl ReadOnlyNetworkGraph<'_> { /// Returns all known valid channels' short ids along with announced channel info. /// - /// (C-not exported) because we don't want to return lifetime'd references + /// This is not exported to bindings users because we don't want to return lifetime'd references pub fn channels(&self) -> &IndexedMap { &*self.channels } @@ -1873,7 +1976,7 @@ impl ReadOnlyNetworkGraph<'_> { /// Returns all known nodes' public keys along with announced node info. /// - /// (C-not exported) because we don't want to return lifetime'd references + /// This is not exported to bindings users because we don't want to return lifetime'd references pub fn nodes(&self) -> &IndexedMap { &*self.nodes } @@ -1893,30 +1996,26 @@ impl ReadOnlyNetworkGraph<'_> { /// Returns None if the requested node is completely unknown, /// or if node announcement for the node was never received. pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { - if let Some(node) = self.nodes.get(&NodeId::from_pubkey(&pubkey)) { - if let Some(node_info) = node.announcement_info.as_ref() { - return Some(node_info.addresses.clone()) - } - } - None + self.nodes.get(&NodeId::from_pubkey(&pubkey)) + .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec())) } } #[cfg(test)] -mod tests { - use crate::chain; +pub(crate) mod tests { + use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::ln::channelmanager; use crate::ln::chan_utils::make_funding_redeemscript; #[cfg(feature = "std")] use crate::ln::features::InitFeatures; use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo}; + use crate::routing::utxo::{UtxoLookupError, UtxoResult}; use crate::ln::msgs::{RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; use crate::util::config::UserConfig; use crate::util::test_utils; - use crate::util::ser::{ReadableArgs, Writeable}; - use crate::util::events::{MessageSendEvent, MessageSendEventsProvider}; + use crate::util::ser::{ReadableArgs, Readable, Writeable}; use crate::util::scid_utils::scid_from_parts; use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS; @@ -1940,9 +2039,8 @@ mod tests { use crate::sync::Arc; fn create_network_graph() -> NetworkGraph> { - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); let logger = Arc::new(test_utils::TestLogger::new()); - NetworkGraph::new(genesis_hash, logger) + NetworkGraph::new(Network::Testnet, logger) } fn create_gossip_sync(network_graph: &NetworkGraph>) -> ( @@ -1970,14 +2068,14 @@ mod tests { assert!(!gossip_sync.should_request_full_sync(&node_id)); } - fn get_signed_node_announcement(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> NodeAnnouncement { - let node_id = PublicKey::from_secret_key(&secp_ctx, node_key); + pub(crate) fn get_signed_node_announcement(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> NodeAnnouncement { + let node_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_key)); let mut unsigned_announcement = UnsignedNodeAnnouncement { features: channelmanager::provided_node_features(&UserConfig::default()), timestamp: 100, - node_id: node_id, + node_id, rgb: [0; 3], - alias: [0; 32], + alias: NodeAlias([0; 32]), addresses: Vec::new(), excess_address_data: Vec::new(), excess_data: Vec::new(), @@ -1990,7 +2088,7 @@ mod tests { } } - fn get_signed_channel_announcement(f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelAnnouncement { + pub(crate) fn get_signed_channel_announcement(f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelAnnouncement { let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_key); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_key); let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); @@ -2017,14 +2115,14 @@ mod tests { } } - fn get_channel_script(secp_ctx: &Secp256k1) -> Script { + pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> Script { let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap(); make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey), &PublicKey::from_secret_key(secp_ctx, &node_2_btckey)).to_v0_p2wsh() } - fn get_signed_channel_update(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelUpdate { + pub(crate) fn get_signed_channel_update(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelUpdate { let mut unsigned_channel_update = UnsignedChannelUpdate { chain_hash: genesis_block(Network::Testnet).header.block_hash(), short_channel_id: 0, @@ -2117,8 +2215,7 @@ mod tests { let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); // Test if the UTXO lookups were not supported - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let network_graph = NetworkGraph::new(genesis_hash, &logger); + let network_graph = NetworkGraph::new(Network::Testnet, &logger); let mut gossip_sync = P2PGossipSync::new(&network_graph, None, &logger); match gossip_sync.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), @@ -2141,8 +2238,8 @@ mod tests { // Test if an associated transaction were not on-chain (or not confirmed). let chain_source = test_utils::TestChainSource::new(Network::Testnet); - *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx); - let network_graph = NetworkGraph::new(genesis_hash, &logger); + *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)); + let network_graph = NetworkGraph::new(Network::Testnet, &logger); gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger); let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { @@ -2154,7 +2251,8 @@ mod tests { }; // Now test if the transaction is found in the UTXO set and the script is correct. - *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script.clone() }); + *chain_source.utxo_ret.lock().unwrap() = + UtxoResult::Sync(Ok(TxOut { value: 0, script_pubkey: good_script.clone() })); let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { unsigned_announcement.short_channel_id += 2; }, node_1_privkey, node_2_privkey, &secp_ctx); @@ -2172,7 +2270,8 @@ mod tests { // If we receive announcement for the same channel, once we've validated it against the // chain, we simply ignore all new (duplicate) announcements. - *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script }); + *chain_source.utxo_ret.lock().unwrap() = + UtxoResult::Sync(Ok(TxOut { value: 0, script_pubkey: good_script })); match gossip_sync.handle_channel_announcement(&valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Already have chain-validated channel") @@ -2226,6 +2325,16 @@ mod tests { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself") }; + + // Test that channel announcements with the wrong chain hash are ignored (network graph is testnet, + // announcement is mainnet). + let incorrect_chain_announcement = get_signed_channel_announcement(|unsigned_announcement| { + unsigned_announcement.chain_hash = genesis_block(Network::Bitcoin).header.block_hash(); + }, node_1_privkey, node_2_privkey, &secp_ctx); + match gossip_sync.handle_channel_announcement(&incorrect_chain_announcement) { + Ok(_) => panic!(), + Err(e) => assert_eq!(e.err, "Channel announcement chain hash does not match genesis hash") + }; } #[test] @@ -2233,8 +2342,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let logger = test_utils::TestLogger::new(); let chain_source = test_utils::TestChainSource::new(Network::Testnet); - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let network_graph = NetworkGraph::new(genesis_hash, &logger); + let network_graph = NetworkGraph::new(Network::Testnet, &logger); let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); @@ -2246,7 +2354,8 @@ mod tests { { // Announce a channel we will update let good_script = get_channel_script(&secp_ctx); - *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: amount_sats, script_pubkey: good_script.clone() }); + *chain_source.utxo_ret.lock().unwrap() = + UtxoResult::Sync(Ok(TxOut { value: amount_sats, script_pubkey: good_script.clone() })); let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); short_channel_id = valid_channel_announcement.contents.short_channel_id; @@ -2330,13 +2439,23 @@ mod tests { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message") }; + + // Test that channel updates with the wrong chain hash are ignored (network graph is testnet, channel + // update is mainet). + let incorrect_chain_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.chain_hash = genesis_block(Network::Bitcoin).header.block_hash(); + }, node_1_privkey, &secp_ctx); + + match gossip_sync.handle_channel_update(&incorrect_chain_update) { + Ok(_) => panic!(), + Err(e) => assert_eq!(e.err, "Channel update chain hash does not match genesis hash") + }; } #[test] fn handling_network_update() { let logger = test_utils::TestLogger::new(); - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let network_graph = NetworkGraph::new(genesis_hash, &logger); + let network_graph = NetworkGraph::new(Network::Testnet, &logger); let secp_ctx = Secp256k1::new(); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); @@ -2367,7 +2486,7 @@ mod tests { assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); } - // Non-permanent closing just disables a channel + // Non-permanent failure doesn't touch the channel at all { match network_graph.read_only().channels().get(&short_channel_id) { None => panic!(), @@ -2384,7 +2503,7 @@ mod tests { match network_graph.read_only().channels().get(&short_channel_id) { None => panic!(), Some(channel_info) => { - assert!(!channel_info.one_to_two.as_ref().unwrap().enabled); + assert!(channel_info.one_to_two.as_ref().unwrap().enabled); } }; } @@ -2401,7 +2520,7 @@ mod tests { { // Get a new network graph since we don't want to track removed nodes in this test with "std" - let network_graph = NetworkGraph::new(genesis_hash, &logger); + let network_graph = NetworkGraph::new(Network::Testnet, &logger); // Announce a channel to test permanent node failure let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); @@ -2436,8 +2555,7 @@ mod tests { // Test the removal of channels with `remove_stale_channels_and_tracking`. let logger = test_utils::TestLogger::new(); let chain_source = test_utils::TestChainSource::new(Network::Testnet); - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let network_graph = NetworkGraph::new(genesis_hash, &logger); + let network_graph = NetworkGraph::new(Network::Testnet, &logger); let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger); let secp_ctx = Secp256k1::new(); @@ -2519,7 +2637,7 @@ mod tests { // Mark the channel as permanently failed. This will also remove the two nodes // and all of the entries will be tracked as removed. - network_graph.channel_failed_with_time(short_channel_id, true, Some(tracking_time)); + network_graph.channel_failed_permanent_with_time(short_channel_id, Some(tracking_time)); // Should not remove from tracking if insufficient time has passed network_graph.remove_stale_channels_and_tracking_with_time( @@ -2552,7 +2670,7 @@ mod tests { // Mark the channel as permanently failed. This will also remove the two nodes // and all of the entries will be tracked as removed. - network_graph.channel_failed(short_channel_id, true); + network_graph.channel_failed_permanent(short_channel_id); // The first time we call the following, the channel will have a removal time assigned. network_graph.remove_stale_channels_and_tracking_with_time(removal_time); @@ -2652,7 +2770,7 @@ mod tests { let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); + let node_id_1 = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey)); // No nodes yet. let next_announcements = gossip_sync.get_next_node_announcement(None); @@ -2768,7 +2886,7 @@ mod tests { // It should ignore if gossip_queries feature is not enabled { let init_msg = Init { features: InitFeatures::empty(), remote_network_address: None }; - gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap(); + gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap(); let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); } @@ -2778,7 +2896,7 @@ mod tests { let mut features = InitFeatures::empty(); features.set_gossip_queries_optional(); let init_msg = Init { features, remote_network_address: None }; - gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap(); + gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap(); let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { @@ -3223,26 +3341,25 @@ mod tests { #[test] fn node_info_is_readable() { - use std::convert::TryFrom; - // 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one - let valid_netaddr = crate::ln::msgs::NetAddress::Hostname { hostname: crate::util::ser::Hostname::try_from("A".to_string()).unwrap(), port: 1234 }; + let announcement_message = hex::decode("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); + let announcement_message = NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap(); let valid_node_ann_info = NodeAnnouncementInfo { features: channelmanager::provided_node_features(&UserConfig::default()), last_update: 0, rgb: [0u8; 3], alias: NodeAlias([0u8; 32]), - addresses: vec![valid_netaddr], - announcement_message: None, + announcement_message: Some(announcement_message) }; let mut encoded_valid_node_ann_info = Vec::new(); assert!(valid_node_ann_info.write(&mut encoded_valid_node_ann_info).is_ok()); - let read_valid_node_ann_info: NodeAnnouncementInfo = crate::util::ser::Readable::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap(); + let read_valid_node_ann_info = NodeAnnouncementInfo::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap(); assert_eq!(read_valid_node_ann_info, valid_node_ann_info); + assert_eq!(read_valid_node_ann_info.addresses().len(), 1); let encoded_invalid_node_ann_info = hex::decode("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); - let read_invalid_node_ann_info_res: Result = crate::util::ser::Readable::read(&mut encoded_invalid_node_ann_info.as_slice()); + let read_invalid_node_ann_info_res = NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice()); assert!(read_invalid_node_ann_info_res.is_err()); // 2. Check we can read a NodeInfo anyways, but set the NodeAnnouncementInfo to None if invalid @@ -3253,13 +3370,22 @@ mod tests { let mut encoded_valid_node_info = Vec::new(); assert!(valid_node_info.write(&mut encoded_valid_node_info).is_ok()); - let read_valid_node_info: NodeInfo = crate::util::ser::Readable::read(&mut encoded_valid_node_info.as_slice()).unwrap(); + let read_valid_node_info = NodeInfo::read(&mut encoded_valid_node_info.as_slice()).unwrap(); assert_eq!(read_valid_node_info, valid_node_info); let encoded_invalid_node_info_hex = hex::decode("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); - let read_invalid_node_info: NodeInfo = crate::util::ser::Readable::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap(); + let read_invalid_node_info = NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap(); assert_eq!(read_invalid_node_info.announcement_info, None); } + + #[test] + fn test_node_info_keeps_compatibility() { + let old_ann_info_with_addresses = hex::decode("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); + let ann_info_with_addresses = NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice()) + .expect("to be able to read an old NodeAnnouncementInfo with addresses"); + // This serialized info has an address field but no announcement_message, therefore the addresses returned by our function will still be empty + assert!(ann_info_with_addresses.addresses().is_empty()); + } } #[cfg(all(test, feature = "_bench_unstable"))]