X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fgossip.rs;h=950ec79a1e98e19e8d31da31624227ddca07832a;hb=e64342afab9c5aed1ed3592fcc2e2f3a38fac101;hp=dc02206db24efbece6f434832d0061c113e563bf;hpb=32d6e91fd6547241a04dc7827e11efe0ed7e5f4f;p=rust-lightning diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index dc02206d..950ec79a 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -9,6 +9,8 @@ //! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers +use bitcoin::blockdata::constants::ChainHash; + use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; use bitcoin::secp256k1::{PublicKey, Verification}; use bitcoin::secp256k1::Secp256k1; @@ -16,15 +18,12 @@ use bitcoin::secp256k1; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; -use bitcoin::hashes::hex::FromHex; -use bitcoin::hash_types::BlockHash; - use bitcoin::network::constants::Network; -use bitcoin::blockdata::constants::genesis_block; use crate::events::{MessageSendEvent, MessageSendEventsProvider}; +use crate::ln::ChannelId; use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures}; -use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; +use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT}; use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter}; use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use crate::ln::msgs; @@ -80,6 +79,11 @@ impl NodeId { &self.0 } + /// Get the public key as an array from this NodeId + pub fn as_array(&self) -> &[u8; PUBLIC_KEY_SIZE] { + &self.0 + } + /// Get the public key from this NodeId pub fn as_pubkey(&self) -> Result { PublicKey::from_slice(&self.0) @@ -153,10 +157,10 @@ impl TryFrom for PublicKey { } impl FromStr for NodeId { - type Err = bitcoin::hashes::hex::Error; + type Err = hex::parse::HexToArrayError; fn from_str(s: &str) -> Result { - let data: [u8; PUBLIC_KEY_SIZE] = FromHex::from_hex(s)?; + let data: [u8; PUBLIC_KEY_SIZE] = hex::FromHex::from_hex(s)?; Ok(NodeId(data)) } } @@ -165,7 +169,7 @@ impl FromStr for NodeId { pub struct NetworkGraph where L::Target: Logger { secp_ctx: Secp256k1, last_rapid_gossip_sync_timestamp: Mutex>, - genesis_hash: BlockHash, + chain_hash: ChainHash, logger: L, // Lock order: channels -> nodes channels: RwLock>, @@ -254,7 +258,7 @@ pub struct P2PGossipSync>, U: Deref, L: Deref> where U::Target: UtxoLookup, L::Target: Logger { network_graph: G, - utxo_lookup: Option, + utxo_lookup: RwLock>, #[cfg(feature = "std")] full_syncs_requested: AtomicUsize, pending_events: Mutex>, @@ -273,7 +277,7 @@ where U::Target: UtxoLookup, L::Target: Logger network_graph, #[cfg(feature = "std")] full_syncs_requested: AtomicUsize::new(0), - utxo_lookup, + utxo_lookup: RwLock::new(utxo_lookup), pending_events: Mutex::new(vec![]), logger, } @@ -282,8 +286,8 @@ where U::Target: UtxoLookup, L::Target: Logger /// Adds a provider used to check new announcements. Does not affect /// existing announcements unless they are updated. /// Add, update or remove the provider would replace the current one. - pub fn add_utxo_lookup(&mut self, utxo_lookup: Option) { - self.utxo_lookup = utxo_lookup; + pub fn add_utxo_lookup(&self, utxo_lookup: Option) { + *self.utxo_lookup.write().unwrap() = utxo_lookup; } /// Gets a reference to the underlying [`NetworkGraph`] which was provided in @@ -340,6 +344,9 @@ where U::Target: UtxoLookup, L::Target: Logger impl NetworkGraph where L::Target: Logger { /// Handles any network updates originating from [`Event`]s. + // + /// Note that this will skip applying any [`NetworkUpdate::ChannelUpdateMessage`] to avoid + /// leaking possibly identifying information of the sender to the public network. /// /// [`Event`]: crate::events::Event pub fn handle_network_update(&self, network_update: &NetworkUpdate) { @@ -348,8 +355,7 @@ impl NetworkGraph where L::Target: Logger { let short_channel_id = msg.contents.short_channel_id; let is_enabled = msg.contents.flags & (1 << 1) != (1 << 1); let status = if is_enabled { "enabled" } else { "disabled" }; - log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}.", short_channel_id, status); - let _ = self.update_channel(msg); + log_debug!(self.logger, "Skipping application of a channel update from a payment failure. Channel {} is {}.", short_channel_id, status); }, NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => { if is_permanent { @@ -367,9 +373,9 @@ impl NetworkGraph where L::Target: Logger { } } - /// Gets the genesis hash for this network graph. - pub fn get_genesis_hash(&self) -> BlockHash { - self.genesis_hash + /// Gets the chain hash for this network graph. + pub fn get_chain_hash(&self) -> ChainHash { + self.chain_hash } } @@ -382,7 +388,7 @@ macro_rules! secp_verify_sig { err: format!("Invalid signature on {} message", $msg_type), action: ErrorAction::SendWarningMessage { msg: msgs::WarningMessage { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), data: format!("Invalid signature on {} message", $msg_type), }, log_level: Level::Trace, @@ -400,7 +406,7 @@ macro_rules! get_pubkey_from_node_id { err: format!("Invalid public key on {} message", $msg_type), action: ErrorAction::SendWarningMessage { msg: msgs::WarningMessage { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), data: format!("Invalid public key on {} message", $msg_type), }, log_level: Level::Trace @@ -409,11 +415,17 @@ macro_rules! get_pubkey_from_node_id { } } +fn message_sha256d_hash(msg: &M) -> Sha256dHash { + let mut engine = Sha256dHash::engine(); + msg.write(&mut engine).expect("In-memory structs should not fail to serialize"); + Sha256dHash::from_engine(engine) +} + /// Verifies the signature of a [`NodeAnnouncement`]. /// /// Returns an error if it is invalid. pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement"); Ok(()) @@ -423,7 +435,7 @@ pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ct /// /// Returns an error if one of the signatures is invalid. pub fn verify_channel_announcement(msg: &ChannelAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement"); secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement"); secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement"); @@ -443,7 +455,7 @@ where U::Target: UtxoLookup, L::Target: Logger } fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { - self.network_graph.update_channel_from_announcement(msg, &self.utxo_lookup)?; + self.network_graph.update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } @@ -580,7 +592,7 @@ where U::Target: UtxoLookup, L::Target: Logger pending_events.push(MessageSendEvent::SendGossipTimestampFilter { node_id: their_node_id.clone(), msg: GossipTimestampFilter { - chain_hash: self.network_graph.genesis_hash, + chain_hash: self.network_graph.chain_hash, first_timestamp: gossip_start_time as u32, // 2106 issue! timestamp_range: u32::max_value(), }, @@ -619,7 +631,7 @@ where U::Target: UtxoLookup, L::Target: Logger let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); // Per spec, we must reply to a query. Send an empty message when things are invalid. - if msg.chain_hash != self.network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { + if msg.chain_hash != self.network_graph.chain_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(MessageSendEvent::SendReplyChannelRange { node_id: their_node_id.clone(), @@ -858,31 +870,31 @@ impl ChannelInfo { /// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a /// returned `source`, or `None` if `target` is not one of the channel's counterparties. pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - let (direction, source) = { + let (direction, source, outbound) = { if target == &self.node_one { - (self.two_to_one.as_ref(), &self.node_two) + (self.two_to_one.as_ref(), &self.node_two, false) } else if target == &self.node_two { - (self.one_to_two.as_ref(), &self.node_one) + (self.one_to_two.as_ref(), &self.node_one, true) } else { return None; } }; - direction.map(|dir| (DirectedChannelInfo::new(self, dir), source)) + direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), source)) } /// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a /// returned `target`, or `None` if `source` is not one of the channel's counterparties. pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - let (direction, target) = { + let (direction, target, outbound) = { if source == &self.node_one { - (self.one_to_two.as_ref(), &self.node_two) + (self.one_to_two.as_ref(), &self.node_two, true) } else if source == &self.node_two { - (self.two_to_one.as_ref(), &self.node_one) + (self.two_to_one.as_ref(), &self.node_one, false) } else { return None; } }; - direction.map(|dir| (DirectedChannelInfo::new(self, dir), target)) + direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), target)) } /// Returns a [`ChannelUpdateInfo`] based on the direction implied by the channel_flag. @@ -978,51 +990,55 @@ impl Readable for ChannelInfo { pub struct DirectedChannelInfo<'a> { channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, - htlc_maximum_msat: u64, - effective_capacity: EffectiveCapacity, + /// The direction this channel is in - if set, it indicates that we're traversing the channel + /// from [`ChannelInfo::node_one`] to [`ChannelInfo::node_two`]. + from_node_one: bool, } impl<'a> DirectedChannelInfo<'a> { #[inline] - fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo) -> Self { - let mut htlc_maximum_msat = direction.htlc_maximum_msat; - let capacity_msat = channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); - - let effective_capacity = match capacity_msat { - Some(capacity_msat) => { - htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat); - EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: htlc_maximum_msat } - }, - None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat }, - }; - - Self { - channel, direction, htlc_maximum_msat, effective_capacity - } + fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool) -> Self { + Self { channel, direction, from_node_one } } /// Returns information for the channel. #[inline] pub fn channel(&self) -> &'a ChannelInfo { self.channel } - /// Returns the maximum HTLC amount allowed over the channel in the direction. - #[inline] - pub fn htlc_maximum_msat(&self) -> u64 { - self.htlc_maximum_msat - } - /// Returns the [`EffectiveCapacity`] of the channel in the direction. /// /// This is either the total capacity from the funding transaction, if known, or the /// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known, /// otherwise. + #[inline] pub fn effective_capacity(&self) -> EffectiveCapacity { - self.effective_capacity + let mut htlc_maximum_msat = self.direction().htlc_maximum_msat; + let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); + + match capacity_msat { + Some(capacity_msat) => { + htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat); + EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat } + }, + None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat }, + } } /// Returns information for the direction. #[inline] pub(super) fn direction(&self) -> &'a ChannelUpdateInfo { self.direction } + + /// Returns the `node_id` of the source hop. + /// + /// Refers to the `node_id` forwarding the payment to the next hop. + #[inline] + pub fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } } + + /// Returns the `node_id` of the target hop. + /// + /// Refers to the `node_id` receiving the payment from the previous hop. + #[inline] + pub fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } } } impl<'a> fmt::Debug for DirectedChannelInfo<'a> { @@ -1127,7 +1143,7 @@ pub struct NodeAnnouncementInfo { impl NodeAnnouncementInfo { /// Internet-level addresses via which one can connect to the node - pub fn addresses(&self) -> &[NetAddress] { + pub fn addresses(&self) -> &[SocketAddress] { self.announcement_message.as_ref() .map(|msg| msg.contents.addresses.as_slice()) .unwrap_or_default() @@ -1136,7 +1152,7 @@ impl NodeAnnouncementInfo { impl Writeable for NodeAnnouncementInfo { fn write(&self, writer: &mut W) -> Result<(), io::Error> { - let empty_addresses = Vec::::new(); + let empty_addresses = Vec::::new(); write_tlv_fields!(writer, { (0, self.features, required), (2, self.last_update, required), @@ -1159,7 +1175,7 @@ impl Readable for NodeAnnouncementInfo { (8, announcement_message, option), (10, _addresses, optional_vec), // deprecated, not used anymore }); - let _: Option> = _addresses; + let _: Option> = _addresses; Ok(Self { features: features.0.unwrap(), last_update: last_update.0.unwrap(), rgb: rgb.0.unwrap(), alias: alias.0.unwrap(), announcement_message }) } @@ -1169,7 +1185,7 @@ impl Readable for NodeAnnouncementInfo { /// /// Since node aliases are provided by third parties, they are a potential avenue for injection /// attacks. Care must be taken when processing. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub struct NodeAlias(pub [u8; 32]); impl fmt::Display for NodeAlias { @@ -1235,7 +1251,7 @@ impl Writeable for NodeInfo { } // A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is -// necessary to maintain compatibility with previous serializations of `NetAddress` that have an +// necessary to maintain compatibility with previous serializations of `SocketAddress` that have an // invalid hostname set. We ignore and eat all errors until we are either able to read a // `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end. struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo); @@ -1281,7 +1297,7 @@ impl Writeable for NetworkGraph where L::Target: Logger { fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); - self.genesis_hash.write(writer)?; + self.chain_hash.write(writer)?; let channels = self.channels.read().unwrap(); (channels.len() as u64).write(writer)?; for (ref chan_id, ref chan_info) in channels.unordered_iter() { @@ -1307,16 +1323,18 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { fn read(reader: &mut R, logger: L) -> Result, DecodeError> { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); - let genesis_hash: BlockHash = Readable::read(reader)?; + let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - let mut channels = IndexedMap::new(); + // In Nov, 2023 there were about 15,000 nodes; we cap allocations to 1.5x that. + let mut channels = IndexedMap::with_capacity(cmp::min(channels_count as usize, 22500)); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info = Readable::read(reader)?; channels.insert(chan_id, chan_info); } let nodes_count: u64 = Readable::read(reader)?; - let mut nodes = IndexedMap::new(); + // In Nov, 2023 there were about 69K channels; we cap allocations to 1.5x that. + let mut nodes = IndexedMap::with_capacity(cmp::min(nodes_count as usize, 103500)); for _ in 0..nodes_count { let node_id = Readable::read(reader)?; let node_info = Readable::read(reader)?; @@ -1330,13 +1348,13 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { Ok(NetworkGraph { secp_ctx: Secp256k1::verification_only(), - genesis_hash, + chain_hash, logger, channels: RwLock::new(channels), nodes: RwLock::new(nodes), last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp), - removed_nodes: Mutex::new(HashMap::new()), - removed_channels: Mutex::new(HashMap::new()), + removed_nodes: Mutex::new(new_hash_map()), + removed_channels: Mutex::new(new_hash_map()), pending_checks: utxo::PendingChecks::new(), }) } @@ -1366,7 +1384,7 @@ impl PartialEq for NetworkGraph where L::Target: Logger { let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) }; let (channels_a, channels_b) = (a.0.unsafe_well_ordered_double_lock_self(), b.0.unsafe_well_ordered_double_lock_self()); let (nodes_a, nodes_b) = (a.1.unsafe_well_ordered_double_lock_self(), b.1.unsafe_well_ordered_double_lock_self()); - self.genesis_hash.eq(&other.genesis_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b) + self.chain_hash.eq(&other.chain_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b) } } @@ -1375,13 +1393,13 @@ impl NetworkGraph where L::Target: Logger { pub fn new(network: Network, logger: L) -> NetworkGraph { Self { secp_ctx: Secp256k1::verification_only(), - genesis_hash: genesis_block(network).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(network), logger, channels: RwLock::new(IndexedMap::new()), nodes: RwLock::new(IndexedMap::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), - removed_channels: Mutex::new(HashMap::new()), - removed_nodes: Mutex::new(HashMap::new()), + removed_channels: Mutex::new(new_hash_map()), + removed_nodes: Mutex::new(new_hash_map()), pending_checks: utxo::PendingChecks::new(), } } @@ -1607,7 +1625,7 @@ impl NetworkGraph where L::Target: Logger { return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); } - if msg.chain_hash != self.genesis_hash { + if msg.chain_hash != self.chain_hash { return Err(LightningError { err: "Channel announcement chain hash does not match genesis hash".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Debug), @@ -1827,7 +1845,7 @@ impl NetworkGraph where L::Target: Logger { // NOTE: In the case of no-std, we won't have access to the current UNIX time at the time of removal, // so we'll just set the removal time here to the current UNIX time on the very next invocation // of this function. - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] { let mut tracked_time = Some(current_time_unix); core::mem::swap(time, &mut tracked_time); @@ -1844,14 +1862,14 @@ impl NetworkGraph where L::Target: Logger { /// For an already known (from announcement) channel, update info about one of the directions /// of the channel. /// - /// You probably don't want to call this directly, instead relying on a P2PGossipSync's - /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept + /// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s + /// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. /// /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. pub fn update_channel(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> { - self.update_channel_intern(&msg.contents, Some(&msg), Some(&msg.signature)) + self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), false) } /// For an already known (from announcement) channel, update info about one of the directions @@ -1861,13 +1879,26 @@ impl NetworkGraph where L::Target: Logger { /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. pub fn update_channel_unsigned(&self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { - self.update_channel_intern(msg, None, None) + self.update_channel_internal(msg, None, None, false) + } + + /// For an already known (from announcement) channel, verify the given [`ChannelUpdate`]. + /// + /// This checks whether the update currently is applicable by [`Self::update_channel`]. + /// + /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or + /// materially in the future will be rejected. + pub fn verify_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> { + self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), true) } - fn update_channel_intern(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>) -> Result<(), LightningError> { + fn update_channel_internal(&self, msg: &msgs::UnsignedChannelUpdate, + full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>, + only_verify: bool) -> Result<(), LightningError> + { let chan_enabled = msg.flags & (1 << 1) != (1 << 1); - if msg.chain_hash != self.genesis_hash { + if msg.chain_hash != self.chain_hash { return Err(LightningError { err: "Channel update chain hash does not match genesis hash".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Debug), @@ -1951,7 +1982,7 @@ impl NetworkGraph where L::Target: Logger { } } } - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]); if msg.flags & 1 == 1 { check_update_latest!(channel.two_to_one); if let Some(sig) = sig { @@ -1960,7 +1991,9 @@ impl NetworkGraph where L::Target: Logger { action: ErrorAction::IgnoreAndLog(Level::Debug) })?, "channel_update"); } - channel.two_to_one = get_new_channel_info!(); + if !only_verify { + channel.two_to_one = get_new_channel_info!(); + } } else { check_update_latest!(channel.one_to_two); if let Some(sig) = sig { @@ -1969,7 +2002,9 @@ impl NetworkGraph where L::Target: Logger { action: ErrorAction::IgnoreAndLog(Level::Debug) })?, "channel_update"); } - channel.one_to_two = get_new_channel_info!(); + if !only_verify { + channel.one_to_two = get_new_channel_info!(); + } } } } @@ -2038,7 +2073,7 @@ impl ReadOnlyNetworkGraph<'_> { /// Get network addresses by node id. /// Returns None if the requested node is completely unknown, /// or if node announcement for the node was never received. - pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { + pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { self.nodes.get(&NodeId::from_pubkey(&pubkey)) .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec())) } @@ -2066,13 +2101,11 @@ pub(crate) mod tests { use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; + use bitcoin::hashes::hex::FromHex; use bitcoin::network::constants::Network; - use bitcoin::blockdata::constants::genesis_block; - use bitcoin::blockdata::script::Script; + use bitcoin::blockdata::constants::ChainHash; + use bitcoin::blockdata::script::ScriptBuf; use bitcoin::blockdata::transaction::TxOut; - - use hex; - use bitcoin::secp256k1::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; @@ -2101,7 +2134,7 @@ pub(crate) mod tests { fn request_full_sync_finite_times() { let network_graph = create_network_graph(); let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); - let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); + let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); assert!(gossip_sync.should_request_full_sync(&node_id)); assert!(gossip_sync.should_request_full_sync(&node_id)); @@ -2139,7 +2172,7 @@ pub(crate) mod tests { let mut unsigned_announcement = UnsignedChannelAnnouncement { features: channelmanager::provided_channel_features(&UserConfig::default()), - chain_hash: genesis_block(Network::Testnet).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 0, node_id_1: NodeId::from_pubkey(&node_id_1), node_id_2: NodeId::from_pubkey(&node_id_2), @@ -2158,7 +2191,7 @@ pub(crate) mod tests { } } - pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> Script { + pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> ScriptBuf { let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap(); make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey), @@ -2167,7 +2200,7 @@ pub(crate) mod tests { pub(crate) fn get_signed_channel_update(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelUpdate { let mut unsigned_channel_update = UnsignedChannelUpdate { - chain_hash: genesis_block(Network::Testnet).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 0, timestamp: 100, flags: 0, @@ -2215,7 +2248,7 @@ pub(crate) mod tests { Err(_) => panic!() }; - let fake_msghash = hash_to_message!(&zero_hash); + let fake_msghash = hash_to_message!(zero_hash.as_byte_array()); match gossip_sync.handle_node_announcement( &NodeAnnouncement { signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey), @@ -2372,7 +2405,7 @@ pub(crate) mod tests { // Test that channel announcements with the wrong chain hash are ignored (network graph is testnet, // announcement is mainnet). let incorrect_chain_announcement = get_signed_channel_announcement(|unsigned_announcement| { - unsigned_announcement.chain_hash = genesis_block(Network::Bitcoin).header.block_hash(); + unsigned_announcement.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); }, node_1_privkey, node_2_privkey, &secp_ctx); match gossip_sync.handle_channel_announcement(&incorrect_chain_announcement) { Ok(_) => panic!(), @@ -2410,6 +2443,7 @@ pub(crate) mod tests { } let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); + network_graph.verify_channel_update(&valid_channel_update).unwrap(); match gossip_sync.handle_channel_update(&valid_channel_update) { Ok(res) => assert!(res), _ => panic!(), @@ -2476,7 +2510,7 @@ pub(crate) mod tests { unsigned_channel_update.timestamp += 500; }, node_1_privkey, &secp_ctx); let zero_hash = Sha256dHash::hash(&[0; 32]); - let fake_msghash = hash_to_message!(&zero_hash); + let fake_msghash = hash_to_message!(zero_hash.as_byte_array()); invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey); match gossip_sync.handle_channel_update(&invalid_sig_channel_update) { Ok(_) => panic!(), @@ -2486,7 +2520,7 @@ pub(crate) mod tests { // Test that channel updates with the wrong chain hash are ignored (network graph is testnet, channel // update is mainet). let incorrect_chain_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.chain_hash = genesis_block(Network::Bitcoin).header.block_hash(); + unsigned_channel_update.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); }, node_1_privkey, &secp_ctx); match gossip_sync.handle_channel_update(&incorrect_chain_update) { @@ -2512,7 +2546,8 @@ pub(crate) mod tests { let short_channel_id; { - // Announce a channel we will update + // Check we won't apply an update via `handle_network_update` for privacy reasons, but + // can continue fine if we manually apply it. let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); short_channel_id = valid_channel_announcement.contents.short_channel_id; let chain_source: Option<&test_utils::TestChainSource> = None; @@ -2523,10 +2558,11 @@ pub(crate) mod tests { assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); network_graph.handle_network_update(&NetworkUpdate::ChannelUpdateMessage { - msg: valid_channel_update, + msg: valid_channel_update.clone(), }); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); + assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); + network_graph.update_channel(&valid_channel_update).unwrap(); } // Non-permanent failure doesn't touch the channel at all @@ -2924,7 +2960,7 @@ pub(crate) mod tests { let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); // It should ignore if gossip_queries feature is not enabled { @@ -2961,7 +2997,7 @@ pub(crate) mod tests { let network_graph = create_network_graph(); let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); @@ -3013,13 +3049,13 @@ pub(crate) mod tests { &gossip_sync, &node_id_2, QueryChannelRange { - chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Bitcoin), first_blocknum: 0, number_of_blocks: 0xffff_ffff, }, false, vec![ReplyChannelRange { - chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Bitcoin), first_blocknum: 0, number_of_blocks: 0xffff_ffff, sync_complete: true, @@ -3254,7 +3290,7 @@ pub(crate) mod tests { let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); let result = gossip_sync.handle_query_short_channel_ids(&node_id, QueryShortChannelIds { chain_hash, @@ -3313,16 +3349,16 @@ pub(crate) mod tests { assert_eq!(chan_update_info, read_chan_update_info); // Check the serialization hasn't changed. - let legacy_chan_update_info_with_some: Vec = hex::decode("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap(); + let legacy_chan_update_info_with_some: Vec = >::from_hex("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap(); assert_eq!(encoded_chan_update_info, legacy_chan_update_info_with_some); // Check we fail if htlc_maximum_msat is not present in either the ChannelUpdateInfo itself // or the ChannelUpdate enclosed with `last_update_message`. - let legacy_chan_update_info_with_some_and_fail_update: Vec = hex::decode("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap(); + let legacy_chan_update_info_with_some_and_fail_update: Vec = >::from_hex("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap(); let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_some_and_fail_update.as_slice()); assert!(read_chan_update_info_res.is_err()); - let legacy_chan_update_info_with_none: Vec = hex::decode("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap(); + let legacy_chan_update_info_with_none: Vec = >::from_hex("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap(); let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice()); assert!(read_chan_update_info_res.is_err()); @@ -3364,18 +3400,18 @@ pub(crate) mod tests { assert_eq!(chan_info_some_updates, read_chan_info); // Check the serialization hasn't changed. - let legacy_chan_info_with_some: Vec = hex::decode("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); + let legacy_chan_info_with_some: Vec = >::from_hex("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); assert_eq!(encoded_chan_info, legacy_chan_info_with_some); // Check we can decode legacy ChannelInfo, even if the `two_to_one` / `one_to_two` / // `last_update_message` fields fail to decode due to missing htlc_maximum_msat. - let legacy_chan_info_with_some_and_fail_update = hex::decode("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap(); + let legacy_chan_info_with_some_and_fail_update = >::from_hex("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap(); let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_some_and_fail_update.as_slice()).unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); assert_eq!(read_chan_info.two_to_one, None); - let legacy_chan_info_with_none: Vec = hex::decode("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); + let legacy_chan_info_with_none: Vec = >::from_hex("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); @@ -3385,7 +3421,7 @@ pub(crate) mod tests { #[test] fn node_info_is_readable() { // 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one - let announcement_message = hex::decode("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); + let announcement_message = >::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); let announcement_message = NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap(); let valid_node_ann_info = NodeAnnouncementInfo { features: channelmanager::provided_node_features(&UserConfig::default()), @@ -3401,7 +3437,7 @@ pub(crate) mod tests { assert_eq!(read_valid_node_ann_info, valid_node_ann_info); assert_eq!(read_valid_node_ann_info.addresses().len(), 1); - let encoded_invalid_node_ann_info = hex::decode("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); + let encoded_invalid_node_ann_info = >::from_hex("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); let read_invalid_node_ann_info_res = NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice()); assert!(read_invalid_node_ann_info_res.is_err()); @@ -3416,14 +3452,14 @@ pub(crate) mod tests { let read_valid_node_info = NodeInfo::read(&mut encoded_valid_node_info.as_slice()).unwrap(); assert_eq!(read_valid_node_info, valid_node_info); - let encoded_invalid_node_info_hex = hex::decode("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); + let encoded_invalid_node_info_hex = >::from_hex("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); let read_invalid_node_info = NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap(); assert_eq!(read_invalid_node_info.announcement_info, None); } #[test] fn test_node_info_keeps_compatibility() { - let old_ann_info_with_addresses = hex::decode("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); + let old_ann_info_with_addresses = >::from_hex("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); let ann_info_with_addresses = NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice()) .expect("to be able to read an old NodeAnnouncementInfo with addresses"); // This serialized info has an address field but no announcement_message, therefore the addresses returned by our function will still be empty