From: Elias Rohrer Date: Tue, 24 Sep 2024 04:51:52 +0000 (+0900) Subject: `rustfmt`: Run on `routing/gossip.rs` X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=22b82d36e20f9d6a8e380e1a9f13dc3b9ade7067;p=rust-lightning `rustfmt`: Run on `routing/gossip.rs` --- diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 6cb4d4d64..805b50197 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -24,9 +24,16 @@ use bitcoin::network::Network; use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use crate::ln::msgs; -use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, GossipTimestampFilter, NodeAnnouncement}; -use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT}; -use crate::ln::msgs::{QueryChannelRange, QueryShortChannelIds, ReplyChannelRange, ReplyShortChannelIdsEnd}; +use crate::ln::msgs::{ + ChannelAnnouncement, ChannelUpdate, GossipTimestampFilter, NodeAnnouncement, +}; +use crate::ln::msgs::{ + DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, + MAX_VALUE_MSAT, +}; +use crate::ln::msgs::{ + QueryChannelRange, QueryShortChannelIds, ReplyChannelRange, ReplyShortChannelIdsEnd, +}; use crate::ln::types::ChannelId; use crate::routing::utxo::{self, UtxoLookup, UtxoResolver}; use crate::util::indexed_map::{Entry as IndexedMapEntry, IndexedMap}; @@ -169,7 +176,10 @@ impl FromStr for NodeId { } /// Represents the network as nodes and channels between them -pub struct NetworkGraph where L::Target: Logger { +pub struct NetworkGraph +where + L::Target: Logger, +{ secp_ctx: Secp256k1, last_rapid_gossip_sync_timestamp: Mutex>, chain_hash: ChainHash, @@ -232,7 +242,7 @@ pub enum NetworkUpdate { /// Whether the node should be permanently removed from consideration or can be restored /// when a new `channel_update` message is received. is_permanent: bool, - } + }, } impl Writeable for NetworkUpdate { @@ -251,7 +261,7 @@ impl Writeable for NetworkUpdate { (0, node_id, required), (2, is_permanent, required), }); - } + }, } Ok(()) } @@ -270,7 +280,7 @@ impl MaybeReadable for NetworkUpdate { }); Ok(Some(Self::ChannelFailure { short_channel_id: msg.0.unwrap().contents.short_channel_id, - is_permanent: false + is_permanent: false, })) }, 2 => { @@ -292,7 +302,7 @@ impl MaybeReadable for NetworkUpdate { node_id: node_id.0.unwrap(), is_permanent: is_permanent.0.unwrap(), })) - } + }, t if t % 2 == 0 => Err(DecodeError::UnknownRequiredFeature), _ => Ok(None), } @@ -304,8 +314,10 @@ impl MaybeReadable for NetworkUpdate { /// This network graph is then used for routing payments. /// Provides interface to help with initial routing sync by /// serving historical announcements. -pub struct P2PGossipSync>, U: Deref, L: Deref> -where U::Target: UtxoLookup, L::Target: Logger +pub struct P2PGossipSync>, U: Deref, L: Deref> +where + U::Target: UtxoLookup, + L::Target: Logger, { network_graph: G, utxo_lookup: RwLock>, @@ -314,8 +326,10 @@ where U::Target: UtxoLookup, L::Target: Logger logger: L, } -impl>, U: Deref, L: Deref> P2PGossipSync -where U::Target: UtxoLookup, L::Target: Logger +impl>, U: Deref, L: Deref> P2PGossipSync +where + U::Target: UtxoLookup, + L::Target: Logger, { /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming an existing [`NetworkGraph`]. @@ -364,20 +378,25 @@ where U::Target: UtxoLookup, L::Target: Logger pub(super) fn forward_gossip_msg(&self, mut ev: MessageSendEvent) { match &mut ev { MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { return; } - if update_msg.as_ref() - .map(|msg| msg.contents.excess_data.len()).unwrap_or(0) > MAX_EXCESS_BYTES_FOR_RELAY + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { + return; + } + if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0) + > MAX_EXCESS_BYTES_FOR_RELAY { *update_msg = None; } }, MessageSendEvent::BroadcastChannelUpdate { msg } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { return; } + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { + return; + } }, MessageSendEvent::BroadcastNodeAnnouncement { msg } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY || - msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY || - msg.contents.excess_data.len() + msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + || msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + || msg.contents.excess_data.len() + msg.contents.excess_address_data.len() + > MAX_EXCESS_BYTES_FOR_RELAY { return; } @@ -388,7 +407,10 @@ where U::Target: UtxoLookup, L::Target: Logger } } -impl NetworkGraph where L::Target: Logger { +impl NetworkGraph +where + L::Target: Logger, +{ /// Handles any network updates originating from [`Event`]s. /// /// [`Event`]: crate::events::Event @@ -396,14 +418,21 @@ impl NetworkGraph where L::Target: Logger { match *network_update { NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => { if is_permanent { - log_debug!(self.logger, "Removing channel graph entry for {} due to a payment failure.", short_channel_id); + log_debug!( + self.logger, + "Removing channel graph entry for {} due to a payment failure.", + short_channel_id + ); self.channel_failed_permanent(short_channel_id); } }, NetworkUpdate::NodeFailure { ref node_id, is_permanent } => { if is_permanent { - log_debug!(self.logger, - "Removed node graph entry for {} due to a payment failure.", log_pubkey!(node_id)); + log_debug!( + self.logger, + "Removed node graph entry for {} due to a payment failure.", + log_pubkey!(node_id) + ); self.node_failed_permanent(node_id); }; }, @@ -438,18 +467,17 @@ macro_rules! secp_verify_sig { macro_rules! get_pubkey_from_node_id { ( $node_id: expr, $msg_type: expr ) => { - PublicKey::from_slice($node_id.as_slice()) - .map_err(|_| LightningError { - err: format!("Invalid public key on {} message", $msg_type), - action: ErrorAction::SendWarningMessage { - msg: msgs::WarningMessage { - channel_id: ChannelId::new_zero(), - data: format!("Invalid public key on {} message", $msg_type), - }, - log_level: Level::Trace - } - })? - } + PublicKey::from_slice($node_id.as_slice()).map_err(|_| LightningError { + err: format!("Invalid public key on {} message", $msg_type), + action: ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id: ChannelId::new_zero(), + data: format!("Invalid public key on {} message", $msg_type), + }, + log_level: Level::Trace, + }, + })? + }; } fn message_sha256d_hash(msg: &M) -> Sha256dHash { @@ -461,9 +489,17 @@ fn message_sha256d_hash(msg: &M) -> Sha256dHash { /// Verifies the signature of a [`NodeAnnouncement`]. /// /// Returns an error if it is invalid. -pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { +pub fn verify_node_announcement( + msg: &NodeAnnouncement, secp_ctx: &Secp256k1, +) -> Result<(), LightningError> { let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement"); + secp_verify_sig!( + secp_ctx, + &msg_hash, + &msg.signature, + &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), + "node_announcement" + ); Ok(()) } @@ -471,37 +507,76 @@ pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ct /// Verifies all signatures included in a [`ChannelAnnouncement`]. /// /// Returns an error if one of the signatures is invalid. -pub fn verify_channel_announcement(msg: &ChannelAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { +pub fn verify_channel_announcement( + msg: &ChannelAnnouncement, secp_ctx: &Secp256k1, +) -> Result<(), LightningError> { let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement"); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement"); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement"); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), "channel_announcement"); + secp_verify_sig!( + secp_ctx, + &msg_hash, + &msg.node_signature_1, + &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), + "channel_announcement" + ); + secp_verify_sig!( + secp_ctx, + &msg_hash, + &msg.node_signature_2, + &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), + "channel_announcement" + ); + secp_verify_sig!( + secp_ctx, + &msg_hash, + &msg.bitcoin_signature_1, + &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), + "channel_announcement" + ); + secp_verify_sig!( + secp_ctx, + &msg_hash, + &msg.bitcoin_signature_2, + &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), + "channel_announcement" + ); Ok(()) } -impl>, U: Deref, L: Deref> RoutingMessageHandler for P2PGossipSync -where U::Target: UtxoLookup, L::Target: Logger +impl>, U: Deref, L: Deref> RoutingMessageHandler + for P2PGossipSync +where + U::Target: UtxoLookup, + L::Target: Logger, { - fn handle_node_announcement(&self, _their_node_id: Option, msg: &msgs::NodeAnnouncement) -> Result { + fn handle_node_announcement( + &self, _their_node_id: Option, msg: &msgs::NodeAnnouncement, + ) -> Result { self.network_graph.update_node_from_announcement(msg)?; - Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && - msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && - msg.contents.excess_data.len() + msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) + Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY + && msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY + && msg.contents.excess_data.len() + msg.contents.excess_address_data.len() + <= MAX_EXCESS_BYTES_FOR_RELAY) } - fn handle_channel_announcement(&self, _their_node_id: Option, msg: &msgs::ChannelAnnouncement) -> Result { - self.network_graph.update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?; + fn handle_channel_announcement( + &self, _their_node_id: Option, msg: &msgs::ChannelAnnouncement, + ) -> Result { + self.network_graph + .update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } - fn handle_channel_update(&self, _their_node_id: Option, msg: &msgs::ChannelUpdate) -> Result { + fn handle_channel_update( + &self, _their_node_id: Option, msg: &msgs::ChannelUpdate, + ) -> Result { self.network_graph.update_channel(msg)?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } - fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(ChannelAnnouncement, Option, Option)> { + fn get_next_channel_announcement( + &self, starting_point: u64, + ) -> Option<(ChannelAnnouncement, Option, Option)> { let mut channels = self.network_graph.channels.write().unwrap(); for (_, ref chan) in channels.range(starting_point..) { if chan.announcement_message.is_some() { @@ -523,13 +598,15 @@ where U::Target: UtxoLookup, L::Target: Logger None } - fn get_next_node_announcement(&self, starting_point: Option<&NodeId>) -> Option { + fn get_next_node_announcement( + &self, starting_point: Option<&NodeId>, + ) -> Option { let mut nodes = self.network_graph.nodes.write().unwrap(); let iter = if let Some(node_id) = starting_point { - nodes.range((Bound::Excluded(node_id), Bound::Unbounded)) - } else { - nodes.range(..) - }; + nodes.range((Bound::Excluded(node_id), Bound::Unbounded)) + } else { + nodes.range(..) + }; for (_, ref node) in iter { if let Some(node_info) = node.announcement_info.as_ref() { if let NodeAnnouncementInfo::Relayed(announcement) = node_info { @@ -555,7 +632,9 @@ where U::Target: UtxoLookup, L::Target: Logger /// [`query_channel_range`]: msgs::QueryChannelRange /// [`query_scid`]: msgs::QueryShortChannelIds /// [`reply_scids_end`]: msgs::ReplyShortChannelIdsEnd - fn peer_connected(&self, their_node_id: PublicKey, init_msg: &Init, _inbound: bool) -> Result<(), ()> { + fn peer_connected( + &self, their_node_id: PublicKey, init_msg: &Init, _inbound: bool, + ) -> Result<(), ()> { // We will only perform a sync with peers that support gossip_queries. if !init_msg.features.supports_gossip_queries() { // Don't disconnect peers for not supporting gossip queries. We may wish to have @@ -619,7 +698,10 @@ where U::Target: UtxoLookup, L::Target: Logger let should_sync = self.should_request_full_sync(); #[cfg(feature = "std")] { - gossip_start_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + gossip_start_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs(); if should_sync { gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago } else { @@ -639,14 +721,18 @@ where U::Target: UtxoLookup, L::Target: Logger Ok(()) } - fn handle_reply_channel_range(&self, _their_node_id: PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { + fn handle_reply_channel_range( + &self, _their_node_id: PublicKey, _msg: ReplyChannelRange, + ) -> Result<(), LightningError> { // We don't make queries, so should never receive replies. If, in the future, the set // reconciliation extensions to gossip queries become broadly supported, we should revert // this code to its state pre-0.0.106. Ok(()) } - fn handle_reply_short_channel_ids_end(&self, _their_node_id: PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { + fn handle_reply_short_channel_ids_end( + &self, _their_node_id: PublicKey, _msg: ReplyShortChannelIdsEnd, + ) -> Result<(), LightningError> { // We don't make queries, so should never receive replies. If, in the future, the set // reconciliation extensions to gossip queries become broadly supported, we should revert // this code to its state pre-0.0.106. @@ -660,17 +746,30 @@ where U::Target: UtxoLookup, L::Target: Logger /// sync of the public routing table with 128k channels will generated 16 messages and allocate ~1MB. /// Logic can be changed to reduce allocation if/when a full sync of the routing table impacts /// memory constrained systems. - fn handle_query_channel_range(&self, their_node_id: PublicKey, msg: QueryChannelRange) -> Result<(), LightningError> { - log_debug!(self.logger, "Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks); + fn handle_query_channel_range( + &self, their_node_id: PublicKey, msg: QueryChannelRange, + ) -> Result<(), LightningError> { + log_debug!( + self.logger, + "Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", + log_pubkey!(their_node_id), + msg.first_blocknum, + msg.number_of_blocks + ); let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0); // We might receive valid queries with end_blocknum that would overflow SCID conversion. // If so, we manually cap the ending block to avoid this overflow. - let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); + let exclusive_end_scid = + scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); // Per spec, we must reply to a query. Send an empty message when things are invalid. - if msg.chain_hash != self.network_graph.chain_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { + if msg.chain_hash != self.network_graph.chain_hash + || inclusive_start_scid.is_err() + || exclusive_end_scid.is_err() + || msg.number_of_blocks == 0 + { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(MessageSendEvent::SendReplyChannelRange { node_id: their_node_id.clone(), @@ -680,7 +779,7 @@ where U::Target: UtxoLookup, L::Target: Logger number_of_blocks: msg.number_of_blocks, sync_complete: true, short_channel_ids: vec![], - } + }, }); return Err(LightningError { err: String::from("query_channel_range could not be processed"), @@ -693,7 +792,9 @@ where U::Target: UtxoLookup, L::Target: Logger // exists even if its not yet routable. let mut batches: Vec> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)]; let mut channels = self.network_graph.channels.write().unwrap(); - for (_, ref chan) in channels.range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) { + for (_, ref chan) in + channels.range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) + { if let Some(chan_announcement) = &chan.announcement_message { // Construct a new batch if last one is full if batches.last().unwrap().len() == batches.last().unwrap().capacity() { @@ -731,7 +832,7 @@ where U::Target: UtxoLookup, L::Target: Logger // // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and // first_blocknum will be either msg.first_blocknum or a higher block height. - let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 { + let (sync_complete, number_of_blocks) = if batch_index == batch_count - 1 { (true, msg.end_blocknum() - first_blocknum) } // Prior replies should use the number of blocks that fit into the reply. Overflow @@ -750,14 +851,16 @@ where U::Target: UtxoLookup, L::Target: Logger number_of_blocks, sync_complete, short_channel_ids: batch, - } + }, }); } Ok(()) } - fn handle_query_short_channel_ids(&self, _their_node_id: PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { + fn handle_query_short_channel_ids( + &self, _their_node_id: PublicKey, _msg: QueryShortChannelIds, + ) -> Result<(), LightningError> { // TODO Err(LightningError { err: String::from("Not implemented"), @@ -782,7 +885,8 @@ where U::Target: UtxoLookup, L::Target: Logger } } -impl>, U: Deref, L: Deref> MessageSendEventsProvider for P2PGossipSync +impl>, U: Deref, L: Deref> MessageSendEventsProvider + for P2PGossipSync where U::Target: UtxoLookup, L::Target: Logger, @@ -830,7 +934,15 @@ pub struct ChannelUpdateInfo { impl fmt::Display for ChannelUpdateInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fees {:?}", self.last_update, self.enabled, self.cltv_expiry_delta, self.htlc_minimum_msat, self.fees)?; + write!( + f, + "last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fees {:?}", + self.last_update, + self.enabled, + self.cltv_expiry_delta, + self.htlc_minimum_msat, + self.fees + )?; Ok(()) } } @@ -941,14 +1053,14 @@ pub struct ChannelInfo { impl PartialEq for ChannelInfo { fn eq(&self, o: &ChannelInfo) -> bool { - self.features == o.features && - self.node_one == o.node_one && - self.one_to_two == o.one_to_two && - self.node_two == o.node_two && - self.two_to_one == o.two_to_one && - self.capacity_sats == o.capacity_sats && - self.announcement_message == o.announcement_message && - self.announcement_received_time == o.announcement_received_time + self.features == o.features + && self.node_one == o.node_one + && self.one_to_two == o.one_to_two + && self.node_two == o.node_two + && self.two_to_one == o.two_to_one + && self.capacity_sats == o.capacity_sats + && self.announcement_message == o.announcement_message + && self.announcement_received_time == o.announcement_received_time } } @@ -956,7 +1068,9 @@ impl ChannelInfo { /// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a /// returned `source`, or `None` if `target` is not one of the channel's counterparties. pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - if self.one_to_two.is_none() || self.two_to_one.is_none() { return None; } + if self.one_to_two.is_none() || self.two_to_one.is_none() { + return None; + } let (direction, source, outbound) = { if target == &self.node_one { (self.two_to_one.as_ref(), &self.node_two, false) @@ -973,7 +1087,9 @@ impl ChannelInfo { /// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a /// returned `target`, or `None` if `source` is not one of the channel's counterparties. pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { - if self.one_to_two.is_none() || self.two_to_one.is_none() { return None; } + if self.one_to_two.is_none() || self.two_to_one.is_none() { + return None; + } let (direction, target, outbound) = { if source == &self.node_one { (self.one_to_two.as_ref(), &self.node_two, true) @@ -1000,8 +1116,15 @@ impl ChannelInfo { impl fmt::Display for ChannelInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}", - log_bytes!(self.features.encode()), &self.node_one, self.one_to_two, &self.node_two, self.two_to_one)?; + write!( + f, + "features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}", + log_bytes!(self.features.encode()), + &self.node_one, + self.one_to_two, + &self.node_two, + self.two_to_one + )?; Ok(()) } } @@ -1069,7 +1192,10 @@ impl Readable for ChannelInfo { two_to_one: two_to_one_wrap.map(|w| w.0).unwrap_or(None), capacity_sats: _init_tlv_based_struct_field!(capacity_sats, required), announcement_message: _init_tlv_based_struct_field!(announcement_message, required), - announcement_received_time: _init_tlv_based_struct_field!(announcement_received_time, (default_value, 0)), + announcement_received_time: _init_tlv_based_struct_field!( + announcement_received_time, + (default_value, 0) + ), node_one_counter: u32::max_value(), node_two_counter: u32::max_value(), }) @@ -1091,7 +1217,9 @@ pub struct DirectedChannelInfo<'a> { impl<'a> DirectedChannelInfo<'a> { #[inline] - fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool) -> Self { + fn new( + channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool, + ) -> Self { let (source_counter, target_counter) = if from_node_one { (channel.node_one_counter, channel.node_two_counter) } else { @@ -1102,7 +1230,9 @@ impl<'a> DirectedChannelInfo<'a> { /// Returns information for the channel. #[inline] - pub fn channel(&self) -> &'a ChannelInfo { self.channel } + pub fn channel(&self) -> &'a ChannelInfo { + self.channel + } /// Returns the [`EffectiveCapacity`] of the channel in the direction. /// @@ -1125,34 +1255,50 @@ impl<'a> DirectedChannelInfo<'a> { /// Returns information for the direction. #[inline] - pub(super) fn direction(&self) -> &'a ChannelUpdateInfo { self.direction } + pub(super) fn direction(&self) -> &'a ChannelUpdateInfo { + self.direction + } /// Returns the `node_id` of the source hop. /// /// Refers to the `node_id` forwarding the payment to the next hop. #[inline] - pub fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } } + pub fn source(&self) -> &'a NodeId { + if self.from_node_one { + &self.channel.node_one + } else { + &self.channel.node_two + } + } /// Returns the `node_id` of the target hop. /// /// Refers to the `node_id` receiving the payment from the previous hop. #[inline] - pub fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } } + pub fn target(&self) -> &'a NodeId { + if self.from_node_one { + &self.channel.node_two + } else { + &self.channel.node_one + } + } /// Returns the source node's counter #[inline(always)] - pub(super) fn source_counter(&self) -> u32 { self.source_counter } + pub(super) fn source_counter(&self) -> u32 { + self.source_counter + } /// Returns the target node's counter #[inline(always)] - pub(super) fn target_counter(&self) -> u32 { self.target_counter } + pub(super) fn target_counter(&self) -> u32 { + self.target_counter + } } impl<'a> fmt::Debug for DirectedChannelInfo<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("DirectedChannelInfo") - .field("channel", &self.channel) - .finish() + f.debug_struct("DirectedChannelInfo").field("channel", &self.channel).finish() } } @@ -1179,7 +1325,7 @@ pub enum EffectiveCapacity { /// The funding amount denominated in millisatoshi. capacity_msat: u64, /// The maximum HTLC amount denominated in millisatoshi. - htlc_maximum_msat: u64 + htlc_maximum_msat: u64, }, /// A capacity sufficient to route any payment, typically used for private channels provided by /// an invoice. @@ -1252,16 +1398,11 @@ pub enum NodeAnnouncementInfo { } impl NodeAnnouncementInfo { - /// Protocol features the node announced support for pub fn features(&self) -> &NodeFeatures { match self { - NodeAnnouncementInfo::Relayed(relayed) => { - &relayed.contents.features - } - NodeAnnouncementInfo::Local(local) => { - &local.features - } + NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.features, + NodeAnnouncementInfo::Local(local) => &local.features, } } @@ -1270,24 +1411,16 @@ impl NodeAnnouncementInfo { /// Value may or may not be a timestamp, depending on the policy of the origin node. pub fn last_update(&self) -> u32 { match self { - NodeAnnouncementInfo::Relayed(relayed) => { - relayed.contents.timestamp - } - NodeAnnouncementInfo::Local(local) => { - local.last_update - } + NodeAnnouncementInfo::Relayed(relayed) => relayed.contents.timestamp, + NodeAnnouncementInfo::Local(local) => local.last_update, } } /// Color assigned to the node pub fn rgb(&self) -> [u8; 3] { match self { - NodeAnnouncementInfo::Relayed(relayed) => { - relayed.contents.rgb - } - NodeAnnouncementInfo::Local(local) => { - local.rgb - } + NodeAnnouncementInfo::Relayed(relayed) => relayed.contents.rgb, + NodeAnnouncementInfo::Local(local) => local.rgb, } } @@ -1296,24 +1429,16 @@ impl NodeAnnouncementInfo { /// May be invalid or malicious (eg control chars), should not be exposed to the user. pub fn alias(&self) -> &NodeAlias { match self { - NodeAnnouncementInfo::Relayed(relayed) => { - &relayed.contents.alias - } - NodeAnnouncementInfo::Local(local) => { - &local.alias - } + NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.alias, + NodeAnnouncementInfo::Local(local) => &local.alias, } } /// Internet-level addresses via which one can connect to the node pub fn addresses(&self) -> &[SocketAddress] { match self { - NodeAnnouncementInfo::Relayed(relayed) => { - &relayed.contents.addresses - } - NodeAnnouncementInfo::Local(local) => { - &local.addresses - } + NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.addresses, + NodeAnnouncementInfo::Local(local) => &local.addresses, } } @@ -1322,12 +1447,8 @@ impl NodeAnnouncementInfo { /// Not stored if contains excess data to prevent DoS. pub fn announcement_message(&self) -> Option<&NodeAnnouncement> { match self { - NodeAnnouncementInfo::Relayed(announcement) => { - Some(announcement) - } - NodeAnnouncementInfo::Local(_) => { - None - } + NodeAnnouncementInfo::Relayed(announcement) => Some(announcement), + NodeAnnouncementInfo::Local(_) => None, } } } @@ -1453,8 +1574,12 @@ impl NodeInfo { impl fmt::Display for NodeInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, " channels: {:?}, announcement_info: {:?}", - &self.channels[..], self.announcement_info)?; + write!( + f, + " channels: {:?}, announcement_info: {:?}", + &self.channels[..], + self.announcement_info + )?; Ok(()) } } @@ -1482,7 +1607,7 @@ impl MaybeReadable for NodeAnnouncementInfoDeserWrapper { Ok(node_announcement_info) => return Ok(Some(Self(node_announcement_info))), Err(_) => { copy(reader, &mut sink()).unwrap(); - return Ok(None) + return Ok(None); }, }; } @@ -1501,7 +1626,8 @@ impl Readable for NodeInfo { (4, channels, required_vec), }); let _: Option = _lowest_inbound_channel_fees; - let announcement_info_wrap: Option = announcement_info_wrap; + let announcement_info_wrap: Option = + announcement_info_wrap; Ok(NodeInfo { announcement_info: announcement_info_wrap.map(|w| w.0), @@ -1514,7 +1640,10 @@ impl Readable for NodeInfo { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; -impl Writeable for NetworkGraph where L::Target: Logger { +impl Writeable for NetworkGraph +where + L::Target: Logger, +{ fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.test_node_counter_consistency(); @@ -1542,7 +1671,10 @@ impl Writeable for NetworkGraph where L::Target: Logger { } } -impl ReadableArgs for NetworkGraph where L::Target: Logger { +impl ReadableArgs for NetworkGraph +where + L::Target: Logger, +{ fn read(reader: &mut R, logger: L) -> Result, DecodeError> { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); @@ -1558,7 +1690,9 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { let nodes_count: u64 = Readable::read(reader)?; // There shouldn't be anywhere near `u32::MAX` nodes, and we need some headroom to insert // new nodes during sync, so reject any graphs claiming more than `u32::MAX / 2` nodes. - if nodes_count > u32::max_value() as u64 / 2 { return Err(DecodeError::InvalidValue); } + if nodes_count > u32::max_value() as u64 / 2 { + return Err(DecodeError::InvalidValue); + } // In Nov, 2023 there were about 69K channels; we cap allocations to 1.5x that. let mut nodes = IndexedMap::with_capacity(cmp::min(nodes_count as usize, 103500)); for i in 0..nodes_count { @@ -1596,7 +1730,10 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { } } -impl fmt::Display for NetworkGraph where L::Target: Logger { +impl fmt::Display for NetworkGraph +where + L::Target: Logger, +{ fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { writeln!(f, "Network map\n[Channels]")?; for (key, val) in self.channels.read().unwrap().unordered_iter() { @@ -1611,20 +1748,32 @@ impl fmt::Display for NetworkGraph where L::Target: Logger { } impl Eq for NetworkGraph where L::Target: Logger {} -impl PartialEq for NetworkGraph where L::Target: Logger { +impl PartialEq for NetworkGraph +where + L::Target: Logger, +{ fn eq(&self, other: &Self) -> bool { // For a total lockorder, sort by position in memory and take the inner locks in that order. // (Assumes that we can't move within memory while a lock is held). let ord = ((self as *const _) as usize) < ((other as *const _) as usize); let a = if ord { (&self.channels, &self.nodes) } else { (&other.channels, &other.nodes) }; let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) }; - let (channels_a, channels_b) = (a.0.unsafe_well_ordered_double_lock_self(), b.0.unsafe_well_ordered_double_lock_self()); - let (nodes_a, nodes_b) = (a.1.unsafe_well_ordered_double_lock_self(), b.1.unsafe_well_ordered_double_lock_self()); + let (channels_a, channels_b) = ( + a.0.unsafe_well_ordered_double_lock_self(), + b.0.unsafe_well_ordered_double_lock_self(), + ); + let (nodes_a, nodes_b) = ( + a.1.unsafe_well_ordered_double_lock_self(), + b.1.unsafe_well_ordered_double_lock_self(), + ); self.chain_hash.eq(&other.chain_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b) } } -impl NetworkGraph where L::Target: Logger { +impl NetworkGraph +where + L::Target: Logger, +{ /// Creates a new, empty, network graph. pub fn new(network: Network, logger: L) -> NetworkGraph { Self { @@ -1643,7 +1792,8 @@ impl NetworkGraph where L::Target: Logger { } fn test_node_counter_consistency(&self) { - #[cfg(debug_assertions)] { + #[cfg(debug_assertions)] + { let channels = self.channels.read().unwrap(); let nodes = self.nodes.read().unwrap(); let removed_node_counters = self.removed_node_counters.lock().unwrap(); @@ -1688,7 +1838,8 @@ impl NetworkGraph where L::Target: Logger { ReadOnlyNetworkGraph { channels, nodes, - max_node_counter: (self.next_node_counter.load(Ordering::Acquire) as u32).saturating_sub(1), + max_node_counter: (self.next_node_counter.load(Ordering::Acquire) as u32) + .saturating_sub(1), } } @@ -1701,7 +1852,10 @@ impl NetworkGraph where L::Target: Logger { /// Update the unix timestamp provided by the most recent rapid gossip sync. /// This should be done automatically by the rapid sync process after every sync completion. pub fn set_last_rapid_gossip_sync_timestamp(&self, last_rapid_gossip_sync_timestamp: u32) { - self.last_rapid_gossip_sync_timestamp.lock().unwrap().replace(last_rapid_gossip_sync_timestamp); + self.last_rapid_gossip_sync_timestamp + .lock() + .unwrap() + .replace(last_rapid_gossip_sync_timestamp); } /// Clears the `NodeAnnouncementInfo` field for all nodes in the `NetworkGraph` for testing @@ -1719,13 +1873,18 @@ impl NetworkGraph where L::Target: Logger { /// You probably don't want to call this directly, instead relying on a P2PGossipSync's /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. - pub fn update_node_from_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<(), LightningError> { + pub fn update_node_from_announcement( + &self, msg: &msgs::NodeAnnouncement, + ) -> Result<(), LightningError> { // First check if we have the announcement already to avoid the CPU cost of validating a // redundant announcement. if let Some(node) = self.nodes.read().unwrap().get(&msg.contents.node_id) { if let Some(node_info) = node.announcement_info.as_ref() { - if node_info.last_update() == msg.contents.timestamp { - return Err(LightningError{err: "Update had the same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); + if node_info.last_update() == msg.contents.timestamp { + return Err(LightningError { + err: "Update had the same timestamp as last processed update".to_owned(), + action: ErrorAction::IgnoreDuplicateGossip, + }); } } } @@ -1737,49 +1896,64 @@ impl NetworkGraph where L::Target: Logger { /// given node announcement without verifying the associated signatures. Because we aren't /// given the associated signatures here we cannot relay the node announcement to any of our /// peers. - pub fn update_node_from_unsigned_announcement(&self, msg: &msgs::UnsignedNodeAnnouncement) -> Result<(), LightningError> { + pub fn update_node_from_unsigned_announcement( + &self, msg: &msgs::UnsignedNodeAnnouncement, + ) -> Result<(), LightningError> { self.update_node_from_announcement_intern(msg, None) } - fn update_node_from_announcement_intern(&self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>) -> Result<(), LightningError> { + fn update_node_from_announcement_intern( + &self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>, + ) -> Result<(), LightningError> { let mut nodes = self.nodes.write().unwrap(); match nodes.get_mut(&msg.node_id) { None => { core::mem::drop(nodes); self.pending_checks.check_hold_pending_node_announcement(msg, full_msg)?; - Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}) + Err(LightningError { + err: "No existing channels for node_announcement".to_owned(), + action: ErrorAction::IgnoreError, + }) }, Some(node) => { if let Some(node_info) = node.announcement_info.as_ref() { // The timestamp field is somewhat of a misnomer - the BOLTs use it to order // updates to ensure you always have the latest one, only vaguely suggesting // that it be at least the current time. - if node_info.last_update() > msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); - } else if node_info.last_update() == msg.timestamp { - return Err(LightningError{err: "Update had the same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); + if node_info.last_update() > msg.timestamp { + return Err(LightningError { + err: "Update older than last processed update".to_owned(), + action: ErrorAction::IgnoreDuplicateGossip, + }); + } else if node_info.last_update() == msg.timestamp { + return Err(LightningError { + err: "Update had the same timestamp as last processed update" + .to_owned(), + action: ErrorAction::IgnoreDuplicateGossip, + }); } } - let should_relay = - msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && - msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && - msg.excess_data.len() + msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY; + let should_relay = msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY + && msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY + && msg.excess_data.len() + msg.excess_address_data.len() + <= MAX_EXCESS_BYTES_FOR_RELAY; - node.announcement_info = if let (Some(signed_announcement), true) = (full_msg, should_relay) { - Some(NodeAnnouncementInfo::Relayed(signed_announcement.clone())) - } else { - Some(NodeAnnouncementInfo::Local(NodeAnnouncementDetails { - features: msg.features.clone(), - last_update: msg.timestamp, - rgb: msg.rgb, - alias: msg.alias, - addresses: msg.addresses.clone(), - })) - }; + node.announcement_info = + if let (Some(signed_announcement), true) = (full_msg, should_relay) { + Some(NodeAnnouncementInfo::Relayed(signed_announcement.clone())) + } else { + Some(NodeAnnouncementInfo::Local(NodeAnnouncementDetails { + features: msg.features.clone(), + last_update: msg.timestamp, + rgb: msg.rgb, + alias: msg.alias, + addresses: msg.addresses.clone(), + })) + }; Ok(()) - } + }, } } @@ -1810,7 +1984,7 @@ impl NetworkGraph where L::Target: Logger { /// /// This will skip verification of if the channel is actually on-chain. pub fn update_channel_from_announcement_no_lookup( - &self, msg: &ChannelAnnouncement + &self, msg: &ChannelAnnouncement, ) -> Result<(), LightningError> { self.update_channel_from_announcement::<&UtxoResolver>(msg, &None) } @@ -1822,7 +1996,7 @@ impl NetworkGraph where L::Target: Logger { /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify /// the corresponding UTXO exists on chain and is correctly-formatted. pub fn update_channel_from_unsigned_announcement( - &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option + &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option, ) -> Result<(), LightningError> where U::Target: UtxoLookup, @@ -1837,9 +2011,15 @@ impl NetworkGraph where L::Target: Logger { /// rapid gossip sync server) /// /// All other parameters as used in [`msgs::UnsignedChannelAnnouncement`] fields. - pub fn add_channel_from_partial_announcement(&self, short_channel_id: u64, timestamp: u64, features: ChannelFeatures, node_id_1: PublicKey, node_id_2: PublicKey) -> Result<(), LightningError> { + pub fn add_channel_from_partial_announcement( + &self, short_channel_id: u64, timestamp: u64, features: ChannelFeatures, + node_id_1: PublicKey, node_id_2: PublicKey, + ) -> Result<(), LightningError> { if node_id_1 == node_id_2 { - return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError { + err: "Channel announcement node had a channel with itself".to_owned(), + action: ErrorAction::IgnoreError, + }); }; let node_1 = NodeId::from_pubkey(&node_id_1); @@ -1860,14 +2040,22 @@ impl NetworkGraph where L::Target: Logger { self.add_channel_between_nodes(short_channel_id, channel_info, None) } - fn add_channel_between_nodes(&self, short_channel_id: u64, channel_info: ChannelInfo, utxo_value: Option) -> Result<(), LightningError> { + fn add_channel_between_nodes( + &self, short_channel_id: u64, channel_info: ChannelInfo, utxo_value: Option, + ) -> Result<(), LightningError> { let mut channels = self.channels.write().unwrap(); let mut nodes = self.nodes.write().unwrap(); let node_id_a = channel_info.node_one.clone(); let node_id_b = channel_info.node_two.clone(); - log_gossip!(self.logger, "Adding channel {} between nodes {} and {}", short_channel_id, node_id_a, node_id_b); + log_gossip!( + self.logger, + "Adding channel {} between nodes {} and {}", + short_channel_id, + node_id_a, + node_id_b + ); let channel_info = match channels.entry(short_channel_id) { IndexedMapEntry::Occupied(mut entry) => { @@ -1887,17 +2075,18 @@ impl NetworkGraph where L::Target: Logger { *entry.get_mut() = channel_info; entry.into_mut() } else { - return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); + return Err(LightningError { + err: "Already have knowledge of channel".to_owned(), + action: ErrorAction::IgnoreDuplicateGossip, + }); } }, - IndexedMapEntry::Vacant(entry) => { - entry.insert(channel_info) - } + IndexedMapEntry::Vacant(entry) => entry.insert(channel_info), }; let mut node_counter_id = [ (&mut channel_info.node_one_counter, node_id_a), - (&mut channel_info.node_two_counter, node_id_b) + (&mut channel_info.node_two_counter, node_id_b), ]; for (chan_info_node_counter, current_node_id) in node_counter_id.iter_mut() { match nodes.entry(current_node_id.clone()) { @@ -1908,16 +2097,17 @@ impl NetworkGraph where L::Target: Logger { }, IndexedMapEntry::Vacant(node_entry) => { let mut removed_node_counters = self.removed_node_counters.lock().unwrap(); - **chan_info_node_counter = removed_node_counters.pop() + **chan_info_node_counter = removed_node_counters + .pop() .unwrap_or(self.next_node_counter.fetch_add(1, Ordering::Relaxed) as u32); node_entry.insert(NodeInfo { - channels: vec!(short_channel_id), + channels: vec![short_channel_id], announcement_info: None, node_counter: **chan_info_node_counter, }); - } + }, }; - }; + } Ok(()) } @@ -1929,7 +2119,10 @@ impl NetworkGraph where L::Target: Logger { /// return an `Ok(())`. fn pre_channel_announcement_validation_check( &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option, - ) -> Result<(), LightningError> where U::Target: UtxoLookup { + ) -> Result<(), LightningError> + where + U::Target: UtxoLookup, + { let channels = self.channels.read().unwrap(); if let Some(chan) = channels.get(&msg.short_channel_id) { @@ -1948,7 +2141,7 @@ impl NetworkGraph where L::Target: Logger { if msg.node_id_1 == chan.node_one && msg.node_id_2 == chan.node_two { return Err(LightningError { err: "Already have chain-validated channel".to_owned(), - action: ErrorAction::IgnoreDuplicateGossip + action: ErrorAction::IgnoreDuplicateGossip, }); } } else if utxo_lookup.is_none() { @@ -1956,7 +2149,7 @@ impl NetworkGraph where L::Target: Logger { // duplicate announcement without bothering to take the channels write lock. return Err(LightningError { err: "Already have non-chain-validated channel".to_owned(), - action: ErrorAction::IgnoreDuplicateGossip + action: ErrorAction::IgnoreDuplicateGossip, }); } } @@ -1969,13 +2162,17 @@ impl NetworkGraph where L::Target: Logger { /// Generally [`Self::pre_channel_announcement_validation_check`] should have been called /// first. fn update_channel_from_unsigned_announcement_intern( - &self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, utxo_lookup: &Option + &self, msg: &msgs::UnsignedChannelAnnouncement, + full_msg: Option<&msgs::ChannelAnnouncement>, utxo_lookup: &Option, ) -> Result<(), LightningError> where U::Target: UtxoLookup, { if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 { - return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError { + err: "Channel announcement node had a channel with itself".to_owned(), + action: ErrorAction::IgnoreError, + }); } if msg.chain_hash != self.chain_hash { @@ -1988,23 +2185,27 @@ impl NetworkGraph where L::Target: Logger { { let removed_channels = self.removed_channels.lock().unwrap(); let removed_nodes = self.removed_nodes.lock().unwrap(); - if removed_channels.contains_key(&msg.short_channel_id) || - removed_nodes.contains_key(&msg.node_id_1) || - removed_nodes.contains_key(&msg.node_id_2) { + if removed_channels.contains_key(&msg.short_channel_id) + || removed_nodes.contains_key(&msg.node_id_1) + || removed_nodes.contains_key(&msg.node_id_2) + { return Err(LightningError{ err: format!("Channel with SCID {} or one of its nodes was removed from our network graph recently", &msg.short_channel_id), action: ErrorAction::IgnoreAndLog(Level::Gossip)}); } } - let utxo_value = self.pending_checks.check_channel_announcement( - utxo_lookup, msg, full_msg)?; + let utxo_value = + self.pending_checks.check_channel_announcement(utxo_lookup, msg, full_msg)?; #[allow(unused_mut, unused_assignments)] let mut announcement_received_time = 0; #[cfg(feature = "std")] { - announcement_received_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + announcement_received_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs(); } let chan_info = ChannelInfo { @@ -2014,8 +2215,11 @@ impl NetworkGraph where L::Target: Logger { node_two: msg.node_id_2, two_to_one: None, capacity_sats: utxo_value.map(|a| a.to_sat()), - announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY - { full_msg.cloned() } else { None }, + announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY { + full_msg.cloned() + } else { + None + }, announcement_received_time, node_one_counter: u32::max_value(), node_two_counter: u32::max_value(), @@ -2023,7 +2227,12 @@ impl NetworkGraph where L::Target: Logger { self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value)?; - log_gossip!(self.logger, "Added channel_announcement for {}{}", msg.short_channel_id, if !msg.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); + log_gossip!( + self.logger, + "Added channel_announcement for {}{}", + msg.short_channel_id, + if !msg.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" } + ); Ok(()) } @@ -2032,7 +2241,9 @@ impl NetworkGraph where L::Target: Logger { /// The channel and any node for which this was their last channel are removed from the graph. pub fn channel_failed_permanent(&self, short_channel_id: u64) { #[cfg(feature = "std")] - let current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs()); + let current_time_unix = Some( + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(), + ); #[cfg(not(feature = "std"))] let current_time_unix = None; @@ -2042,7 +2253,9 @@ impl NetworkGraph where L::Target: Logger { /// Marks a channel in the graph as failed permanently. /// /// The channel and any node for which this was their last channel are removed from the graph. - fn channel_failed_permanent_with_time(&self, short_channel_id: u64, current_time_unix: Option) { + fn channel_failed_permanent_with_time( + &self, short_channel_id: u64, current_time_unix: Option, + ) { let mut channels = self.channels.write().unwrap(); if let Some(chan) = channels.remove(&short_channel_id) { let mut nodes = self.nodes.write().unwrap(); @@ -2055,7 +2268,9 @@ impl NetworkGraph where L::Target: Logger { /// from local storage. pub fn node_failed_permanent(&self, node_id: &PublicKey) { #[cfg(feature = "std")] - let current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs()); + let current_time_unix = Some( + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(), + ); #[cfg(not(feature = "std"))] let current_time_unix = None; @@ -2069,11 +2284,15 @@ impl NetworkGraph where L::Target: Logger { let mut removed_node_counters = self.removed_node_counters.lock().unwrap(); for scid in node.channels.iter() { if let Some(chan_info) = channels.remove(scid) { - let other_node_id = if node_id == chan_info.node_one { chan_info.node_two } else { chan_info.node_one }; - if let IndexedMapEntry::Occupied(mut other_node_entry) = nodes.entry(other_node_id) { - other_node_entry.get_mut().channels.retain(|chan_id| { - *scid != *chan_id - }); + let other_node_id = if node_id == chan_info.node_one { + chan_info.node_two + } else { + chan_info.node_one + }; + if let IndexedMapEntry::Occupied(mut other_node_entry) = + nodes.entry(other_node_id) + { + other_node_entry.get_mut().channels.retain(|chan_id| *scid != *chan_id); if other_node_entry.get().channels.is_empty() { removed_node_counters.push(other_node_entry.get().node_counter); other_node_entry.remove_entry(); @@ -2107,7 +2326,8 @@ impl NetworkGraph where L::Target: Logger { /// This method is only available with the `std` feature. See /// [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] for non-`std` use. pub fn remove_stale_channels_and_tracking(&self) { - let time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + let time = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); self.remove_stale_channels_and_tracking_with_time(time); } @@ -2122,24 +2342,38 @@ impl NetworkGraph where L::Target: Logger { /// This method will also cause us to stop tracking removed nodes and channels if they have been /// in the map for a while so that these can be resynced from gossip in the future. #[cfg_attr(feature = "std", doc = "")] - #[cfg_attr(feature = "std", doc = "This function takes the current unix time as an argument. For users with the `std` feature")] - #[cfg_attr(feature = "std", doc = "enabled, [`NetworkGraph::remove_stale_channels_and_tracking`] may be preferable.")] + #[cfg_attr( + feature = "std", + doc = "This function takes the current unix time as an argument. For users with the `std` feature" + )] + #[cfg_attr( + feature = "std", + doc = "enabled, [`NetworkGraph::remove_stale_channels_and_tracking`] may be preferable." + )] pub fn remove_stale_channels_and_tracking_with_time(&self, current_time_unix: u64) { let mut channels = self.channels.write().unwrap(); // Time out if we haven't received an update in at least 14 days. - if current_time_unix > u32::max_value() as u64 { return; } // Remove by 2106 - if current_time_unix < STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS { return; } + if current_time_unix > u32::max_value() as u64 { + return; + } // Remove by 2106 + if current_time_unix < STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS { + return; + } let min_time_unix: u32 = (current_time_unix - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32; // Sadly BTreeMap::retain was only stabilized in 1.53 so we can't switch to it for some // time. let mut scids_to_remove = Vec::new(); for (scid, info) in channels.unordered_iter_mut() { - if info.one_to_two.is_some() && info.one_to_two.as_ref().unwrap().last_update < min_time_unix { + if info.one_to_two.is_some() + && info.one_to_two.as_ref().unwrap().last_update < min_time_unix + { log_gossip!(self.logger, "Removing directional update one_to_two (0) for channel {} due to its timestamp {} being below {}", scid, info.one_to_two.as_ref().unwrap().last_update, min_time_unix); info.one_to_two = None; } - if info.two_to_one.is_some() && info.two_to_one.as_ref().unwrap().last_update < min_time_unix { + if info.two_to_one.is_some() + && info.two_to_one.as_ref().unwrap().last_update < min_time_unix + { log_gossip!(self.logger, "Removing directional update two_to_one (1) for channel {} due to its timestamp {} being below {}", scid, info.two_to_one.as_ref().unwrap().last_update, min_time_unix); info.two_to_one = None; @@ -2159,7 +2393,9 @@ impl NetworkGraph where L::Target: Logger { if !scids_to_remove.is_empty() { let mut nodes = self.nodes.write().unwrap(); for scid in scids_to_remove { - let info = channels.remove(&scid).expect("We just accessed this scid, it should be present"); + let info = channels + .remove(&scid) + .expect("We just accessed this scid, it should be present"); self.remove_channel_in_nodes(&mut nodes, &info, scid); self.removed_channels.lock().unwrap().insert(scid, Some(current_time_unix)); } @@ -2180,7 +2416,8 @@ impl NetworkGraph where L::Target: Logger { } #[allow(unreachable_code)] false - }}; + } + }; self.removed_channels.lock().unwrap().retain(|_, time| should_keep_tracking(time)); self.removed_nodes.lock().unwrap().retain(|_, time| should_keep_tracking(time)); @@ -2205,7 +2442,9 @@ impl NetworkGraph where L::Target: Logger { /// /// If not built with `std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. - pub fn update_channel_unsigned(&self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { + pub fn update_channel_unsigned( + &self, msg: &msgs::UnsignedChannelUpdate, + ) -> Result<(), LightningError> { self.update_channel_internal(msg, None, None, false) } @@ -2219,10 +2458,10 @@ impl NetworkGraph where L::Target: Logger { self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), true) } - fn update_channel_internal(&self, msg: &msgs::UnsignedChannelUpdate, - full_msg: Option<&msgs::ChannelUpdate>, sig: Option<&secp256k1::ecdsa::Signature>, - only_verify: bool) -> Result<(), LightningError> - { + fn update_channel_internal( + &self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, + sig: Option<&secp256k1::ecdsa::Signature>, only_verify: bool, + ) -> Result<(), LightningError> { let chan_enabled = msg.channel_flags & (1 << 1) != (1 << 1); if msg.chain_hash != self.chain_hash { @@ -2236,12 +2475,21 @@ impl NetworkGraph where L::Target: Logger { { // Note that many tests rely on being able to set arbitrarily old timestamps, thus we // disable this check during tests! - let time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + let time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs(); if (msg.timestamp as u64) < time - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS { - return Err(LightningError{err: "channel_update is older than two weeks old".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)}); + return Err(LightningError { + err: "channel_update is older than two weeks old".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Gossip), + }); } if msg.timestamp as u64 > time + 60 * 60 * 24 { - return Err(LightningError{err: "channel_update has a timestamp more than a day in the future".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)}); + return Err(LightningError { + err: "channel_update has a timestamp more than a day in the future".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Gossip), + }); } } @@ -2254,45 +2502,56 @@ impl NetworkGraph where L::Target: Logger { ); if msg.htlc_maximum_msat > MAX_VALUE_MSAT { - return Err(LightningError{err: - "htlc_maximum_msat is larger than maximum possible msats".to_owned(), - action: ErrorAction::IgnoreError}); - } - - let check_update_latest = |target: &Option| -> Result<(), LightningError> { - if let Some(existing_chan_info) = target { - // The timestamp field is somewhat of a misnomer - the BOLTs use it to - // order updates to ensure you always have the latest one, only - // suggesting that it be at least the current time. For - // channel_updates specifically, the BOLTs discuss the possibility of - // pruning based on the timestamp field being more than two weeks old, - // but only in the non-normative section. - if existing_chan_info.last_update > msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); - } else if existing_chan_info.last_update == msg.timestamp { - return Err(LightningError{err: "Update had same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); + return Err(LightningError { + err: "htlc_maximum_msat is larger than maximum possible msats".to_owned(), + action: ErrorAction::IgnoreError, + }); + } + + let check_update_latest = + |target: &Option| -> Result<(), LightningError> { + if let Some(existing_chan_info) = target { + // The timestamp field is somewhat of a misnomer - the BOLTs use it to + // order updates to ensure you always have the latest one, only + // suggesting that it be at least the current time. For + // channel_updates specifically, the BOLTs discuss the possibility of + // pruning based on the timestamp field being more than two weeks old, + // but only in the non-normative section. + if existing_chan_info.last_update > msg.timestamp { + return Err(LightningError { + err: "Update older than last processed update".to_owned(), + action: ErrorAction::IgnoreDuplicateGossip, + }); + } else if existing_chan_info.last_update == msg.timestamp { + return Err(LightningError { + err: "Update had same timestamp as last processed update".to_owned(), + action: ErrorAction::IgnoreDuplicateGossip, + }); + } } - } - Ok(()) - }; + Ok(()) + }; - let check_msg_sanity = |channel: &ChannelInfo| -> Result<(), LightningError> { - if let Some(capacity_sats) = channel.capacity_sats { - // It's possible channel capacity is available now, although it wasn't available at announcement (so the field is None). - // Don't query UTXO set here to reduce DoS risks. - if capacity_sats > MAX_VALUE_MSAT / 1000 || msg.htlc_maximum_msat > capacity_sats * 1000 { - return Err(LightningError{err: + let check_msg_sanity = + |channel: &ChannelInfo| -> Result<(), LightningError> { + if let Some(capacity_sats) = channel.capacity_sats { + // It's possible channel capacity is available now, although it wasn't available at announcement (so the field is None). + // Don't query UTXO set here to reduce DoS risks. + if capacity_sats > MAX_VALUE_MSAT / 1000 + || msg.htlc_maximum_msat > capacity_sats * 1000 + { + return Err(LightningError{err: "htlc_maximum_msat is larger than channel capacity or capacity is bogus".to_owned(), action: ErrorAction::IgnoreError}); + } } - } - if msg.channel_flags & 1 == 1 { - check_update_latest(&channel.two_to_one) - } else { - check_update_latest(&channel.one_to_two) - } - }; + if msg.channel_flags & 1 == 1 { + check_update_latest(&channel.two_to_one) + } else { + check_update_latest(&channel.one_to_two) + } + }; let node_pubkey; { @@ -2313,11 +2572,10 @@ impl NetworkGraph where L::Target: Logger { } else { channel.node_one.as_slice() }; - node_pubkey = PublicKey::from_slice(node_id) - .map_err(|_| LightningError{ - err: "Couldn't parse source node pubkey".to_owned(), - action: ErrorAction::IgnoreAndLog(Level::Debug) - })?; + node_pubkey = PublicKey::from_slice(node_id).map_err(|_| LightningError { + err: "Couldn't parse source node pubkey".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + })?; }, } } @@ -2327,14 +2585,19 @@ impl NetworkGraph where L::Target: Logger { secp_verify_sig!(self.secp_ctx, &msg_hash, &sig, &node_pubkey, "channel_update"); } - if only_verify { return Ok(()); } + if only_verify { + return Ok(()); + } let mut channels = self.channels.write().unwrap(); if let Some(channel) = channels.get_mut(&msg.short_channel_id) { check_msg_sanity(channel)?; - let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY - { full_msg.cloned() } else { None }; + let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY { + full_msg.cloned() + } else { + None + }; let new_channel_info = Some(ChannelUpdateInfo { enabled: chan_enabled, @@ -2346,7 +2609,7 @@ impl NetworkGraph where L::Target: Logger { base_msat: msg.fee_base_msat, proportional_millionths: msg.fee_proportional_millionths, }, - last_update_message + last_update_message, }); if msg.channel_flags & 1 == 1 { @@ -2359,21 +2622,23 @@ impl NetworkGraph where L::Target: Logger { Ok(()) } - fn remove_channel_in_nodes(&self, nodes: &mut IndexedMap, chan: &ChannelInfo, short_channel_id: u64) { + fn remove_channel_in_nodes( + &self, nodes: &mut IndexedMap, chan: &ChannelInfo, short_channel_id: u64, + ) { macro_rules! remove_from_node { ($node_id: expr) => { if let IndexedMapEntry::Occupied(mut entry) = nodes.entry($node_id) { - entry.get_mut().channels.retain(|chan_id| { - short_channel_id != *chan_id - }); + entry.get_mut().channels.retain(|chan_id| short_channel_id != *chan_id); if entry.get().channels.is_empty() { self.removed_node_counters.lock().unwrap().push(entry.get().node_counter); entry.remove_entry(); } } else { - panic!("Had channel that pointed to unknown node (ie inconsistent network map)!"); + panic!( + "Had channel that pointed to unknown node (ie inconsistent network map)!" + ); } - } + }; } remove_from_node!(chan.node_one); @@ -2422,7 +2687,8 @@ impl ReadOnlyNetworkGraph<'_> { /// Returns None if the requested node is completely unknown, /// or if node announcement for the node was never received. pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { - self.nodes.get(&NodeId::from_pubkey(&pubkey)) + self.nodes + .get(&NodeId::from_pubkey(&pubkey)) .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec())) } @@ -2435,48 +2701,61 @@ impl ReadOnlyNetworkGraph<'_> { #[cfg(test)] pub(crate) mod tests { use crate::events::{MessageSendEvent, MessageSendEventsProvider}; - use crate::ln::channelmanager; use crate::ln::chan_utils::make_funding_redeemscript; + use crate::ln::channelmanager; #[cfg(feature = "std")] use crate::ln::features::InitFeatures; use crate::ln::msgs::SocketAddress; - use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo}; + use crate::ln::msgs::{ + ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, QueryChannelRange, + QueryShortChannelIds, ReplyChannelRange, RoutingMessageHandler, + UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement, + MAX_VALUE_MSAT, + }; + use crate::routing::gossip::{ + ChannelInfo, ChannelUpdateInfo, NetworkGraph, NetworkUpdate, NodeAlias, + NodeAnnouncementInfo, NodeId, NodeInfo, P2PGossipSync, RoutingFees, + MAX_EXCESS_BYTES_FOR_RELAY, + }; use crate::routing::utxo::{UtxoLookupError, UtxoResult}; - use crate::ln::msgs::{RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, - UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, - ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; use crate::util::config::UserConfig; - use crate::util::test_utils; - use crate::util::ser::{Hostname, ReadableArgs, Readable, Writeable}; use crate::util::scid_utils::scid_from_parts; + use crate::util::ser::{Hostname, Readable, ReadableArgs, Writeable}; + use crate::util::test_utils; - use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS; use super::STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS; + use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS; + use bitcoin::amount::Amount; + use bitcoin::constants::ChainHash; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; use bitcoin::hex::FromHex; use bitcoin::network::Network; - use bitcoin::amount::Amount; - use bitcoin::constants::ChainHash; use bitcoin::script::ScriptBuf; - use bitcoin::transaction::TxOut; - use bitcoin::secp256k1::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; + use bitcoin::secp256k1::{PublicKey, SecretKey}; + use bitcoin::transaction::TxOut; use crate::io; - use bitcoin::secp256k1; use crate::prelude::*; use crate::sync::Arc; + use bitcoin::secp256k1; fn create_network_graph() -> NetworkGraph> { let logger = Arc::new(test_utils::TestLogger::new()); NetworkGraph::new(Network::Testnet, logger) } - fn create_gossip_sync(network_graph: &NetworkGraph>) -> ( - Secp256k1, P2PGossipSync<&NetworkGraph>, - Arc, Arc> + fn create_gossip_sync( + network_graph: &NetworkGraph>, + ) -> ( + Secp256k1, + P2PGossipSync< + &NetworkGraph>, + Arc, + Arc, + >, ) { let secp_ctx = Secp256k1::new(); let logger = Arc::new(test_utils::TestLogger::new()); @@ -2497,7 +2776,9 @@ pub(crate) mod tests { assert!(!gossip_sync.should_request_full_sync()); } - pub(crate) fn get_signed_node_announcement(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> NodeAnnouncement { + pub(crate) fn get_signed_node_announcement( + f: F, node_key: &SecretKey, secp_ctx: &Secp256k1, + ) -> NodeAnnouncement { let node_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_key)); let mut unsigned_announcement = UnsignedNodeAnnouncement { features: channelmanager::provided_node_features(&UserConfig::default()), @@ -2513,11 +2794,13 @@ pub(crate) mod tests { let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); NodeAnnouncement { signature: secp_ctx.sign_ecdsa(&msghash, node_key), - contents: unsigned_announcement + contents: unsigned_announcement, } } - pub(crate) fn get_signed_channel_announcement(f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelAnnouncement { + pub(crate) fn get_signed_channel_announcement( + f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1, + ) -> ChannelAnnouncement { let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_key); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_key); let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); @@ -2529,8 +2812,14 @@ pub(crate) mod tests { short_channel_id: 0, node_id_1: NodeId::from_pubkey(&node_id_1), node_id_2: NodeId::from_pubkey(&node_id_2), - bitcoin_key_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_btckey)), - bitcoin_key_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey)), + bitcoin_key_1: NodeId::from_pubkey(&PublicKey::from_secret_key( + &secp_ctx, + node_1_btckey, + )), + bitcoin_key_2: NodeId::from_pubkey(&PublicKey::from_secret_key( + &secp_ctx, + node_2_btckey, + )), excess_data: Vec::new(), }; f(&mut unsigned_announcement); @@ -2547,11 +2836,16 @@ pub(crate) mod tests { pub(crate) fn get_channel_script(secp_ctx: &Secp256k1) -> ScriptBuf { let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap(); - make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey), - &PublicKey::from_secret_key(secp_ctx, &node_2_btckey)).to_p2wsh() + make_funding_redeemscript( + &PublicKey::from_secret_key(secp_ctx, &node_1_btckey), + &PublicKey::from_secret_key(secp_ctx, &node_2_btckey), + ) + .to_p2wsh() } - pub(crate) fn get_signed_channel_update(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelUpdate { + pub(crate) fn get_signed_channel_update( + f: F, node_key: &SecretKey, secp_ctx: &Secp256k1, + ) -> ChannelUpdate { let mut unsigned_channel_update = UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 0, @@ -2563,13 +2857,14 @@ pub(crate) mod tests { htlc_maximum_msat: 1_000_000, fee_base_msat: 10_000, fee_proportional_millionths: 20, - excess_data: Vec::new() + excess_data: Vec::new(), }; f(&mut unsigned_channel_update); - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); + let msghash = + hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); ChannelUpdate { signature: secp_ctx.sign_ecdsa(&msghash, node_key), - contents: unsigned_channel_update + contents: unsigned_channel_update, } } @@ -2586,15 +2881,17 @@ pub(crate) mod tests { let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(_) => panic!(), - Err(e) => assert_eq!("No existing channels for node_announcement", e.err) + Err(e) => assert_eq!("No existing channels for node_announcement", e.err), }; { // Announce a channel to add a corresponding node. - let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { + let valid_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) + { Ok(res) => assert!(res), - _ => panic!() + _ => panic!(), }; } @@ -2603,35 +2900,44 @@ pub(crate) mod tests { Some(node_1_pubkey), &NodeAnnouncement { signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey), - contents: valid_announcement.contents.clone() - }) { + contents: valid_announcement.contents.clone(), + }, + ) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Invalid signature on node_announcement message") + Err(e) => assert_eq!(e.err, "Invalid signature on node_announcement message"), }; match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(res) => assert!(res), - Err(_) => panic!() + Err(_) => panic!(), }; - let announcement_with_data = get_signed_node_announcement(|unsigned_announcement| { - unsigned_announcement.timestamp += 1000; - unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); - }, node_1_privkey, &secp_ctx); + let announcement_with_data = get_signed_node_announcement( + |unsigned_announcement| { + unsigned_announcement.timestamp += 1000; + unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); + }, + node_1_privkey, + &secp_ctx, + ); // Return false because contains excess data. match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement_with_data) { Ok(res) => assert!(!res), - Err(_) => panic!() + Err(_) => panic!(), }; // Even though previous announcement was not relayed further, we still accepted it, // so we now won't accept announcements before the previous one. - let outdated_announcement = get_signed_node_announcement(|unsigned_announcement| { - unsigned_announcement.timestamp += 1000 - 10; - }, node_1_privkey, &secp_ctx); + let outdated_announcement = get_signed_node_announcement( + |unsigned_announcement| { + unsigned_announcement.timestamp += 1000 - 10; + }, + node_1_privkey, + &secp_ctx, + ); match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &outdated_announcement) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Update older than last processed update") + Err(e) => assert_eq!(e.err, "Update older than last processed update"), }; } @@ -2645,20 +2951,25 @@ pub(crate) mod tests { let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let good_script = get_channel_script(&secp_ctx); - let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); // Test if the UTXO lookups were not supported let network_graph = NetworkGraph::new(Network::Testnet, &logger); let mut gossip_sync = P2PGossipSync::new(&network_graph, None, &logger); match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(res) => assert!(res), - _ => panic!() + _ => panic!(), }; { - match network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id) { + match network_graph + .read_only() + .channels() + .get(&valid_announcement.contents.short_channel_id) + { None => panic!(), - Some(_) => () + Some(_) => (), }; } @@ -2666,7 +2977,7 @@ pub(crate) mod tests { // drop new one on the floor, since we can't see any changes. match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Already have non-chain-validated channel") + Err(e) => assert_eq!(e.err, "Already have non-chain-validated channel"), }; // Test if an associated transaction were not on-chain (or not confirmed). @@ -2675,29 +2986,43 @@ pub(crate) mod tests { let network_graph = NetworkGraph::new(Network::Testnet, &logger); gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger); - let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { - unsigned_announcement.short_channel_id += 1; - }, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_announcement = get_signed_channel_announcement( + |unsigned_announcement| { + unsigned_announcement.short_channel_id += 1; + }, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry") + Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry"), }; // Now test if the transaction is found in the UTXO set and the script is correct. *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script.clone() })); - let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { - unsigned_announcement.short_channel_id += 2; - }, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_announcement = get_signed_channel_announcement( + |unsigned_announcement| { + unsigned_announcement.short_channel_id += 2; + }, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(res) => assert!(res), - _ => panic!() + _ => panic!(), }; { - match network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id) { + match network_graph + .read_only() + .channels() + .get(&valid_announcement.contents.short_channel_id) + { None => panic!(), - Some(_) => () + Some(_) => (), }; } @@ -2707,67 +3032,101 @@ pub(crate) mod tests { UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Already have chain-validated channel") + Err(e) => assert_eq!(e.err, "Already have chain-validated channel"), }; #[cfg(feature = "std")] { use std::time::{SystemTime, UNIX_EPOCH}; - let tracking_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + let tracking_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs(); // Mark a node as permanently failed so it's tracked as removed. - gossip_sync.network_graph().node_failed_permanent(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey)); + gossip_sync + .network_graph() + .node_failed_permanent(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey)); // Return error and ignore valid channel announcement if one of the nodes has been tracked as removed. - let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { - unsigned_announcement.short_channel_id += 3; - }, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_announcement = get_signed_channel_announcement( + |unsigned_announcement| { + unsigned_announcement.short_channel_id += 3; + }, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel with SCID 3 or one of its nodes was removed from our network graph recently") } - gossip_sync.network_graph().remove_stale_channels_and_tracking_with_time(tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS); + gossip_sync.network_graph().remove_stale_channels_and_tracking_with_time( + tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS, + ); // The above channel announcement should be handled as per normal now. - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { + match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) + { Ok(res) => assert!(res), - _ => panic!() + _ => panic!(), } } - let valid_excess_data_announcement = get_signed_channel_announcement(|unsigned_announcement| { - unsigned_announcement.short_channel_id += 4; - unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); - }, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_excess_data_announcement = get_signed_channel_announcement( + |unsigned_announcement| { + unsigned_announcement.short_channel_id += 4; + unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); + }, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); let mut invalid_sig_announcement = valid_excess_data_announcement.clone(); invalid_sig_announcement.contents.excess_data = Vec::new(); - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &invalid_sig_announcement) { + match gossip_sync + .handle_channel_announcement(Some(node_1_pubkey), &invalid_sig_announcement) + { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Invalid signature on channel_announcement message") + Err(e) => assert_eq!(e.err, "Invalid signature on channel_announcement message"), }; // Don't relay valid channels with excess data - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_excess_data_announcement) { + match gossip_sync + .handle_channel_announcement(Some(node_1_pubkey), &valid_excess_data_announcement) + { Ok(res) => assert!(!res), - _ => panic!() + _ => panic!(), }; - let channel_to_itself_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_1_privkey, &secp_ctx); - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &channel_to_itself_announcement) { + let channel_to_itself_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_1_privkey, &secp_ctx); + match gossip_sync + .handle_channel_announcement(Some(node_1_pubkey), &channel_to_itself_announcement) + { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself") + Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself"), }; // Test that channel announcements with the wrong chain hash are ignored (network graph is testnet, // announcement is mainnet). - let incorrect_chain_announcement = get_signed_channel_announcement(|unsigned_announcement| { - unsigned_announcement.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); - }, node_1_privkey, node_2_privkey, &secp_ctx); - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &incorrect_chain_announcement) { + let incorrect_chain_announcement = get_signed_channel_announcement( + |unsigned_announcement| { + unsigned_announcement.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); + }, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); + match gossip_sync + .handle_channel_announcement(Some(node_1_pubkey), &incorrect_chain_announcement) + { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Channel announcement chain hash does not match genesis hash") + Err(e) => { + assert_eq!(e.err, "Channel announcement chain hash does not match genesis hash") + }, }; } @@ -2789,16 +3148,20 @@ pub(crate) mod tests { { // Announce a channel we will update let good_script = get_channel_script(&secp_ctx); - *chain_source.utxo_ret.lock().unwrap() = - UtxoResult::Sync(Ok(TxOut { value: amount_sats, script_pubkey: good_script.clone() })); + *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut { + value: amount_sats, + script_pubkey: good_script.clone(), + })); - let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_channel_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); short_channel_id = valid_channel_announcement.contents.short_channel_id; - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement) { + match gossip_sync + .handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement) + { Ok(_) => (), - Err(_) => panic!() + Err(_) => panic!(), }; - } let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); @@ -2814,77 +3177,109 @@ pub(crate) mod tests { Some(channel_info) => { assert_eq!(channel_info.one_to_two.as_ref().unwrap().cltv_expiry_delta, 144); assert!(channel_info.two_to_one.is_none()); - } + }, }; } - let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.timestamp += 100; - unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); - }, node_1_privkey, &secp_ctx); + let valid_channel_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.timestamp += 100; + unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); + }, + node_1_privkey, + &secp_ctx, + ); // Return false because contains excess data match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) { Ok(res) => assert!(!res), - _ => panic!() + _ => panic!(), }; - let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.timestamp += 110; - unsigned_channel_update.short_channel_id += 1; - }, node_1_privkey, &secp_ctx); + let valid_channel_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.timestamp += 110; + unsigned_channel_update.short_channel_id += 1; + }, + node_1_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Couldn't find channel for update") + Err(e) => assert_eq!(e.err, "Couldn't find channel for update"), }; - let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.htlc_maximum_msat = MAX_VALUE_MSAT + 1; - unsigned_channel_update.timestamp += 110; - }, node_1_privkey, &secp_ctx); + let valid_channel_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.htlc_maximum_msat = MAX_VALUE_MSAT + 1; + unsigned_channel_update.timestamp += 110; + }, + node_1_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than maximum possible msats") + Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than maximum possible msats"), }; - let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.htlc_maximum_msat = amount_sats.to_sat() * 1000 + 1; - unsigned_channel_update.timestamp += 110; - }, node_1_privkey, &secp_ctx); + let valid_channel_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.htlc_maximum_msat = amount_sats.to_sat() * 1000 + 1; + unsigned_channel_update.timestamp += 110; + }, + node_1_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than channel capacity or capacity is bogus") + Err(e) => assert_eq!( + e.err, + "htlc_maximum_msat is larger than channel capacity or capacity is bogus" + ), }; // Even though previous update was not relayed further, we still accepted it, // so we now won't accept update before the previous one. - let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.timestamp += 100; - }, node_1_privkey, &secp_ctx); + let valid_channel_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.timestamp += 100; + }, + node_1_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Update had same timestamp as last processed update") + Err(e) => assert_eq!(e.err, "Update had same timestamp as last processed update"), }; - let mut invalid_sig_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.timestamp += 500; - }, node_1_privkey, &secp_ctx); + let mut invalid_sig_channel_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.timestamp += 500; + }, + node_1_privkey, + &secp_ctx, + ); let zero_hash = Sha256dHash::hash(&[0; 32]); let fake_msghash = hash_to_message!(zero_hash.as_byte_array()); invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey); match gossip_sync.handle_channel_update(Some(node_1_pubkey), &invalid_sig_channel_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message") + Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message"), }; // Test that channel updates with the wrong chain hash are ignored (network graph is testnet, channel // update is mainet). - let incorrect_chain_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin); - }, node_1_privkey, &secp_ctx); + let incorrect_chain_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.chain_hash = + ChainHash::using_genesis_block(Network::Bitcoin); + }, + node_1_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_update(Some(node_1_pubkey), &incorrect_chain_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Channel update chain hash does not match genesis hash") + Err(e) => assert_eq!(e.err, "Channel update chain hash does not match genesis hash"), }; } @@ -2906,17 +3301,32 @@ pub(crate) mod tests { let short_channel_id; { // Check that we can manually apply a channel update. - let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_channel_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); short_channel_id = valid_channel_announcement.contents.short_channel_id; let chain_source: Option<&test_utils::TestChainSource> = None; - assert!(network_graph.update_channel_from_announcement(&valid_channel_announcement, &chain_source).is_ok()); + assert!(network_graph + .update_channel_from_announcement(&valid_channel_announcement, &chain_source) + .is_ok()); assert!(network_graph.read_only().channels().get(&short_channel_id).is_some()); let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); + assert!(network_graph + .read_only() + .channels() + .get(&short_channel_id) + .unwrap() + .one_to_two + .is_none()); network_graph.update_channel(&valid_channel_update).unwrap(); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); + assert!(network_graph + .read_only() + .channels() + .get(&short_channel_id) + .unwrap() + .one_to_two + .is_some()); } // Non-permanent failure doesn't touch the channel at all @@ -2925,7 +3335,7 @@ pub(crate) mod tests { None => panic!(), Some(channel_info) => { assert!(channel_info.one_to_two.as_ref().unwrap().enabled); - } + }, }; network_graph.handle_network_update(&NetworkUpdate::ChannelFailure { @@ -2937,7 +3347,7 @@ pub(crate) mod tests { None => panic!(), Some(channel_info) => { assert!(channel_info.one_to_two.as_ref().unwrap().enabled); - } + }, }; } @@ -2956,10 +3366,13 @@ pub(crate) mod tests { let network_graph = NetworkGraph::new(Network::Testnet, &logger); // Announce a channel to test permanent node failure - let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_channel_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); let short_channel_id = valid_channel_announcement.contents.short_channel_id; let chain_source: Option<&test_utils::TestChainSource> = None; - assert!(network_graph.update_channel_from_announcement(&valid_channel_announcement, &chain_source).is_ok()); + assert!(network_graph + .update_channel_from_announcement(&valid_channel_announcement, &chain_source) + .is_ok()); assert!(network_graph.read_only().channels().get(&short_channel_id).is_some()); // Non-permanent node failure does not delete any nodes or channels @@ -2969,7 +3382,11 @@ pub(crate) mod tests { }); assert!(network_graph.read_only().channels().get(&short_channel_id).is_some()); - assert!(network_graph.read_only().nodes().get(&NodeId::from_pubkey(&node_2_id)).is_some()); + assert!(network_graph + .read_only() + .nodes() + .get(&NodeId::from_pubkey(&node_2_id)) + .is_some()); // Permanent node failure deletes node and its channels network_graph.handle_network_update(&NetworkUpdate::NodeFailure { @@ -2996,32 +3413,61 @@ pub(crate) mod tests { let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_channel_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); let short_channel_id = valid_channel_announcement.contents.short_channel_id; let chain_source: Option<&test_utils::TestChainSource> = None; - assert!(network_graph.update_channel_from_announcement(&valid_channel_announcement, &chain_source).is_ok()); + assert!(network_graph + .update_channel_from_announcement(&valid_channel_announcement, &chain_source) + .is_ok()); assert!(network_graph.read_only().channels().get(&short_channel_id).is_some()); // Submit two channel updates for each channel direction (update.flags bit). let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); - assert!(gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update).is_ok()); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); - - let valid_channel_update_2 = get_signed_channel_update(|update| {update.channel_flags |=1;}, node_2_privkey, &secp_ctx); + assert!(gossip_sync + .handle_channel_update(Some(node_1_pubkey), &valid_channel_update) + .is_ok()); + assert!(network_graph + .read_only() + .channels() + .get(&short_channel_id) + .unwrap() + .one_to_two + .is_some()); + + let valid_channel_update_2 = get_signed_channel_update( + |update| { + update.channel_flags |= 1; + }, + node_2_privkey, + &secp_ctx, + ); gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update_2).unwrap(); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().two_to_one.is_some()); - - network_graph.remove_stale_channels_and_tracking_with_time(100 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS); + assert!(network_graph + .read_only() + .channels() + .get(&short_channel_id) + .unwrap() + .two_to_one + .is_some()); + + network_graph.remove_stale_channels_and_tracking_with_time( + 100 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS, + ); assert_eq!(network_graph.read_only().channels().len(), 1); assert_eq!(network_graph.read_only().nodes().len(), 2); - network_graph.remove_stale_channels_and_tracking_with_time(101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS); - #[cfg(not(feature = "std"))] { + network_graph.remove_stale_channels_and_tracking_with_time( + 101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS, + ); + #[cfg(not(feature = "std"))] + { // Make sure removed channels are tracked. assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1); } - network_graph.remove_stale_channels_and_tracking_with_time(101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS + - REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS); + network_graph.remove_stale_channels_and_tracking_with_time( + 101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS, + ); #[cfg(feature = "std")] { @@ -3034,20 +3480,47 @@ pub(crate) mod tests { // Note that the directional channel information will have been removed already.. // We want to check that this will work even if *one* of the channel updates is recent, // so we should add it with a recent timestamp. - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); + assert!(network_graph + .read_only() + .channels() + .get(&short_channel_id) + .unwrap() + .one_to_two + .is_none()); use std::time::{SystemTime, UNIX_EPOCH}; - let announcement_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); - let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.timestamp = (announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32; - }, node_1_privkey, &secp_ctx); - assert!(gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update).is_ok()); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); - network_graph.remove_stale_channels_and_tracking_with_time(announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS); + let announcement_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs(); + let valid_channel_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.timestamp = + (announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32; + }, + node_1_privkey, + &secp_ctx, + ); + assert!(gossip_sync + .handle_channel_update(Some(node_1_pubkey), &valid_channel_update) + .is_ok()); + assert!(network_graph + .read_only() + .channels() + .get(&short_channel_id) + .unwrap() + .one_to_two + .is_some()); + network_graph.remove_stale_channels_and_tracking_with_time( + announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS, + ); // Make sure removed channels are tracked. assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1); // Provide a later time so that sufficient time has passed - network_graph.remove_stale_channels_and_tracking_with_time(announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS + - REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS); + network_graph.remove_stale_channels_and_tracking_with_time( + announcement_time + + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS + + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS, + ); } assert_eq!(network_graph.read_only().channels().len(), 0); @@ -3058,7 +3531,10 @@ pub(crate) mod tests { { use std::time::{SystemTime, UNIX_EPOCH}; - let tracking_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + let tracking_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs(); // Clear tracked nodes and channels for clean slate network_graph.removed_channels.lock().unwrap().clear(); @@ -3066,8 +3542,9 @@ pub(crate) mod tests { // Add a channel and nodes from channel announcement. So our network graph will // now only consist of two nodes and one channel between them. - assert!(network_graph.update_channel_from_announcement( - &valid_channel_announcement, &chain_source).is_ok()); + assert!(network_graph + .update_channel_from_announcement(&valid_channel_announcement, &chain_source) + .is_ok()); // Mark the channel as permanently failed. This will also remove the two nodes // and all of the entries will be tracked as removed. @@ -3075,14 +3552,29 @@ pub(crate) mod tests { // Should not remove from tracking if insufficient time has passed network_graph.remove_stale_channels_and_tracking_with_time( - tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS - 1); - assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1, "Removed channel count ≠ 1 with tracking_time {}", tracking_time); + tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS - 1, + ); + assert_eq!( + network_graph.removed_channels.lock().unwrap().len(), + 1, + "Removed channel count ≠ 1 with tracking_time {}", + tracking_time + ); // Provide a later time so that sufficient time has passed network_graph.remove_stale_channels_and_tracking_with_time( - tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS); - assert!(network_graph.removed_channels.lock().unwrap().is_empty(), "Unexpectedly removed channels with tracking_time {}", tracking_time); - assert!(network_graph.removed_nodes.lock().unwrap().is_empty(), "Unexpectedly removed nodes with tracking_time {}", tracking_time); + tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS, + ); + assert!( + network_graph.removed_channels.lock().unwrap().is_empty(), + "Unexpectedly removed channels with tracking_time {}", + tracking_time + ); + assert!( + network_graph.removed_nodes.lock().unwrap().is_empty(), + "Unexpectedly removed nodes with tracking_time {}", + tracking_time + ); } #[cfg(not(feature = "std"))] @@ -3099,8 +3591,9 @@ pub(crate) mod tests { // Add a channel and nodes from channel announcement. So our network graph will // now only consist of two nodes and one channel between them. - assert!(network_graph.update_channel_from_announcement( - &valid_channel_announcement, &chain_source).is_ok()); + assert!(network_graph + .update_channel_from_announcement(&valid_channel_announcement, &chain_source) + .is_ok()); // Mark the channel as permanently failed. This will also remove the two nodes // and all of the entries will be tracked as removed. @@ -3112,7 +3605,8 @@ pub(crate) mod tests { // Provide a later time so that sufficient time has passed network_graph.remove_stale_channels_and_tracking_with_time( - removal_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS); + removal_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS, + ); assert!(network_graph.removed_channels.lock().unwrap().is_empty()); assert!(network_graph.removed_nodes.lock().unwrap().is_empty()); } @@ -3133,16 +3627,20 @@ pub(crate) mod tests { let short_channel_id; { // Announce a channel we will update - let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_channel_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); short_channel_id = valid_channel_announcement.contents.short_channel_id; - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement) { + match gossip_sync + .handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement) + { Ok(_) => (), - Err(_) => panic!() + Err(_) => panic!(), }; } // Contains initial channel announcement now. - let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id); + let channels_with_announcements = + gossip_sync.get_next_channel_announcement(short_channel_id); if let Some(channel_announcements) = channels_with_announcements { let (_, ref update_1, ref update_2) = channel_announcements; assert_eq!(update_1, &None); @@ -3153,17 +3651,22 @@ pub(crate) mod tests { { // Valid channel update - let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.timestamp = 101; - }, node_1_privkey, &secp_ctx); + let valid_channel_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.timestamp = 101; + }, + node_1_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) { Ok(_) => (), - Err(_) => panic!() + Err(_) => panic!(), }; } // Now contains an initial announcement and an update. - let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id); + let channels_with_announcements = + gossip_sync.get_next_channel_announcement(short_channel_id); if let Some(channel_announcements) = channels_with_announcements { let (_, ref update_1, ref update_2) = channel_announcements; assert_ne!(update_1, &None); @@ -3174,18 +3677,24 @@ pub(crate) mod tests { { // Channel update with excess data. - let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.timestamp = 102; - unsigned_channel_update.excess_data = [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(); - }, node_1_privkey, &secp_ctx); + let valid_channel_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.timestamp = 102; + unsigned_channel_update.excess_data = + [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(); + }, + node_1_privkey, + &secp_ctx, + ); match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) { Ok(_) => (), - Err(_) => panic!() + Err(_) => panic!(), }; } // Test that announcements with excess data won't be returned - let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id); + let channels_with_announcements = + gossip_sync.get_next_channel_announcement(short_channel_id); if let Some(channel_announcements) = channels_with_announcements { let (_, ref update_1, ref update_2) = channel_announcements; assert_eq!(update_1, &None); @@ -3195,7 +3704,8 @@ pub(crate) mod tests { } // Further starting point have no channels after it - let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id + 1000); + let channels_with_announcements = + gossip_sync.get_next_channel_announcement(short_channel_id + 1000); assert!(channels_with_announcements.is_none()); } @@ -3214,10 +3724,13 @@ pub(crate) mod tests { { // Announce a channel to add 2 nodes - let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement) { + let valid_channel_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + match gossip_sync + .handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement) + { Ok(_) => (), - Err(_) => panic!() + Err(_) => panic!(), }; } @@ -3226,16 +3739,18 @@ pub(crate) mod tests { assert!(next_announcements.is_none()); { - let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); + let valid_announcement = + get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(_) => (), - Err(_) => panic!() + Err(_) => panic!(), }; - let valid_announcement = get_signed_node_announcement(|_| {}, node_2_privkey, &secp_ctx); + let valid_announcement = + get_signed_node_announcement(|_| {}, node_2_privkey, &secp_ctx); match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(_) => (), - Err(_) => panic!() + Err(_) => panic!(), }; } @@ -3248,13 +3763,18 @@ pub(crate) mod tests { { // Later announcement which should not be relayed (excess data) prevent us from sharing a node - let valid_announcement = get_signed_node_announcement(|unsigned_announcement| { - unsigned_announcement.timestamp += 10; - unsigned_announcement.excess_data = [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(); - }, node_2_privkey, &secp_ctx); + let valid_announcement = get_signed_node_announcement( + |unsigned_announcement| { + unsigned_announcement.timestamp += 10; + unsigned_announcement.excess_data = + [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(); + }, + node_2_privkey, + &secp_ctx, + ); match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(res) => assert!(!res), - Err(_) => panic!() + Err(_) => panic!(), }; } @@ -3272,16 +3792,17 @@ pub(crate) mod tests { let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); // Announce a channel to add a corresponding node. - let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(res) => assert!(res), - _ => panic!() + _ => panic!(), }; let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(_) => (), - Err(_) => panic!() + Err(_) => panic!(), }; let mut w = test_utils::TestVecWriter(Vec::new()); @@ -3290,7 +3811,9 @@ pub(crate) mod tests { network_graph.write(&mut w).unwrap(); let logger = Arc::new(test_utils::TestLogger::new()); - assert!(>::read(&mut io::Cursor::new(&w.0), logger).unwrap() == network_graph); + assert!( + >::read(&mut io::Cursor::new(&w.0), logger).unwrap() == network_graph + ); } #[test] @@ -3302,7 +3825,8 @@ pub(crate) mod tests { network_graph.write(&mut w).unwrap(); let logger = Arc::new(test_utils::TestLogger::new()); - let reassembled_network_graph: NetworkGraph<_> = ReadableArgs::read(&mut io::Cursor::new(&w.0), logger).unwrap(); + let reassembled_network_graph: NetworkGraph<_> = + ReadableArgs::read(&mut io::Cursor::new(&w.0), logger).unwrap(); assert!(reassembled_network_graph == network_graph); assert_eq!(reassembled_network_graph.get_last_rapid_gossip_sync_timestamp().unwrap(), 42); } @@ -3310,8 +3834,8 @@ pub(crate) mod tests { #[test] #[cfg(feature = "std")] fn calling_sync_routing_table() { - use std::time::{SystemTime, UNIX_EPOCH}; use crate::ln::msgs::Init; + use std::time::{SystemTime, UNIX_EPOCH}; let network_graph = create_network_graph(); let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph); @@ -3322,7 +3846,11 @@ pub(crate) mod tests { // It should ignore if gossip_queries feature is not enabled { - let init_msg = Init { features: InitFeatures::empty(), networks: None, remote_network_address: None }; + let init_msg = Init { + features: InitFeatures::empty(), + networks: None, + remote_network_address: None, + }; gossip_sync.peer_connected(node_id_1, &init_msg, true).unwrap(); let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); @@ -3337,15 +3865,23 @@ pub(crate) mod tests { let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { - MessageSendEvent::SendGossipTimestampFilter{ node_id, msg } => { + MessageSendEvent::SendGossipTimestampFilter { node_id, msg } => { assert_eq!(node_id, &node_id_1); assert_eq!(msg.chain_hash, chain_hash); - let expected_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); - assert!((msg.first_timestamp as u64) >= expected_timestamp - 60*60*24*7*2); - assert!((msg.first_timestamp as u64) < expected_timestamp - 60*60*24*7*2 + 10); + let expected_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs(); + assert!( + (msg.first_timestamp as u64) >= expected_timestamp - 60 * 60 * 24 * 7 * 2 + ); + assert!( + (msg.first_timestamp as u64) + < expected_timestamp - 60 * 60 * 24 * 7 * 2 + 10 + ); assert_eq!(msg.timestamp_range, u32::max_value()); }, - _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery") + _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery"), }; } } @@ -3375,12 +3911,18 @@ pub(crate) mod tests { scids.push(scid_from_parts(108001, 1, 0).unwrap()); for scid in scids { - let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { - unsigned_announcement.short_channel_id = scid; - }, node_1_privkey, node_2_privkey, &secp_ctx); - match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { + let valid_announcement = get_signed_channel_announcement( + |unsigned_announcement| { + unsigned_announcement.short_channel_id = scid; + }, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); + match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) + { Ok(_) => (), - _ => panic!() + _ => panic!(), }; } @@ -3399,8 +3941,8 @@ pub(crate) mod tests { first_blocknum: 0, number_of_blocks: 0, sync_complete: true, - short_channel_ids: vec![] - }] + short_channel_ids: vec![], + }], ); // Error when wrong chain @@ -3419,7 +3961,7 @@ pub(crate) mod tests { number_of_blocks: 0xffff_ffff, sync_complete: true, short_channel_ids: vec![], - }] + }], ); // Error when first_blocknum > 0xffffff @@ -3437,8 +3979,8 @@ pub(crate) mod tests { first_blocknum: 0x01000000, number_of_blocks: 0xffff_ffff, sync_complete: true, - short_channel_ids: vec![] - }] + short_channel_ids: vec![], + }], ); // Empty reply when max valid SCID block num @@ -3451,15 +3993,13 @@ pub(crate) mod tests { number_of_blocks: 1, }, true, - vec![ - ReplyChannelRange { - chain_hash: chain_hash.clone(), - first_blocknum: 0xffffff, - number_of_blocks: 1, - sync_complete: true, - short_channel_ids: vec![] - }, - ] + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xffffff, + number_of_blocks: 1, + sync_complete: true, + short_channel_ids: vec![], + }], ); // No results in valid query range @@ -3472,15 +4012,13 @@ pub(crate) mod tests { number_of_blocks: 1000, }, true, - vec![ - ReplyChannelRange { - chain_hash: chain_hash.clone(), - first_blocknum: 1000, - number_of_blocks: 1000, - sync_complete: true, - short_channel_ids: vec![], - } - ] + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 1000, + number_of_blocks: 1000, + sync_complete: true, + short_channel_ids: vec![], + }], ); // Overflow first_blocknum + number_of_blocks @@ -3493,17 +4031,15 @@ pub(crate) mod tests { number_of_blocks: 0xffffffff, }, true, - vec![ - ReplyChannelRange { - chain_hash: chain_hash.clone(), - first_blocknum: 0xfe0000, - number_of_blocks: 0xffffffff - 0xfe0000, - sync_complete: true, - short_channel_ids: vec![ - 0xfffffe_ffffff_ffff, // max - ] - } - ] + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xfe0000, + number_of_blocks: 0xffffffff - 0xfe0000, + sync_complete: true, + short_channel_ids: vec![ + 0xfffffe_ffffff_ffff, // max + ], + }], ); // Single block exactly full @@ -3516,17 +4052,15 @@ pub(crate) mod tests { number_of_blocks: 8000, }, true, - vec![ - ReplyChannelRange { - chain_hash: chain_hash.clone(), - first_blocknum: 100000, - number_of_blocks: 8000, - sync_complete: true, - short_channel_ids: (100000..=107999) - .map(|block| scid_from_parts(block, 0, 0).unwrap()) - .collect(), - }, - ] + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 8000, + sync_complete: true, + short_channel_ids: (100000..=107999) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }], ); // Multiple split on new block @@ -3554,11 +4088,9 @@ pub(crate) mod tests { first_blocknum: 107999, number_of_blocks: 2, sync_complete: true, - short_channel_ids: vec![ - scid_from_parts(108000, 0, 0).unwrap(), - ], - } - ] + short_channel_ids: vec![scid_from_parts(108000, 0, 0).unwrap()], + }, + ], ); // Multiple split on same block @@ -3586,20 +4118,20 @@ pub(crate) mod tests { first_blocknum: 108001, number_of_blocks: 1, sync_complete: true, - short_channel_ids: vec![ - scid_from_parts(108001, 1, 0).unwrap(), - ], - } - ] + short_channel_ids: vec![scid_from_parts(108001, 1, 0).unwrap()], + }, + ], ); } fn do_handling_query_channel_range( - gossip_sync: &P2PGossipSync<&NetworkGraph>, Arc, Arc>, - test_node_id: &PublicKey, - msg: QueryChannelRange, - expected_ok: bool, - expected_replies: Vec + gossip_sync: &P2PGossipSync< + &NetworkGraph>, + Arc, + Arc, + >, + test_node_id: &PublicKey, msg: QueryChannelRange, expected_ok: bool, + expected_replies: Vec, ) { let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1); let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum; @@ -3627,14 +4159,22 @@ pub(crate) mod tests { assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids); // Enforce exactly the sequencing requirements present on c-lightning v0.9.3 - assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1)); + assert!( + msg.first_blocknum == c_lightning_0_9_prev_end_blocknum + || msg.first_blocknum + == c_lightning_0_9_prev_end_blocknum.saturating_add(1) + ); assert!(msg.first_blocknum >= max_firstblocknum); max_firstblocknum = msg.first_blocknum; - c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks); + c_lightning_0_9_prev_end_blocknum = + msg.first_blocknum.saturating_add(msg.number_of_blocks); // Check that the last block count is >= the query's end_blocknum if i == events.len() - 1 { - assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum); + assert!( + msg.first_blocknum.saturating_add(msg.number_of_blocks) + >= query_end_blocknum + ); } }, _ => panic!("expected MessageSendEvent::SendReplyChannelRange"), @@ -3651,10 +4191,10 @@ pub(crate) mod tests { let chain_hash = ChainHash::using_genesis_block(Network::Testnet); - let result = gossip_sync.handle_query_short_channel_ids(node_id, QueryShortChannelIds { - chain_hash, - short_channel_ids: vec![0x0003e8_000000_0000], - }); + let result = gossip_sync.handle_query_short_channel_ids( + node_id, + QueryShortChannelIds { chain_hash, short_channel_ids: vec![0x0003e8_000000_0000] }, + ); assert!(result.is_err()); } @@ -3685,7 +4225,11 @@ pub(crate) mod tests { fn channel_info_is_readable() { let chanmon_cfgs = crate::ln::functional_test_utils::create_chanmon_cfgs(2); let node_cfgs = crate::ln::functional_test_utils::create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = crate::ln::functional_test_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None, None, None]); + let node_chanmgrs = crate::ln::functional_test_utils::create_node_chanmgrs( + 2, + &node_cfgs, + &[None, None, None, None], + ); let nodes = crate::ln::functional_test_utils::create_network(2, &node_cfgs, &node_chanmgrs); let config = crate::ln::functional_test_utils::test_default_channel_config(); @@ -3704,7 +4248,8 @@ pub(crate) mod tests { assert!(chan_update_info.write(&mut encoded_chan_update_info).is_ok()); // First make sure we can read ChannelUpdateInfos we just wrote - let read_chan_update_info: ChannelUpdateInfo = crate::util::ser::Readable::read(&mut encoded_chan_update_info.as_slice()).unwrap(); + let read_chan_update_info: ChannelUpdateInfo = + crate::util::ser::Readable::read(&mut encoded_chan_update_info.as_slice()).unwrap(); assert_eq!(chan_update_info, read_chan_update_info); // Check the serialization hasn't changed. @@ -3714,11 +4259,15 @@ pub(crate) mod tests { // Check we fail if htlc_maximum_msat is not present in either the ChannelUpdateInfo itself // or the ChannelUpdate enclosed with `last_update_message`. let legacy_chan_update_info_with_some_and_fail_update: Vec = >::from_hex("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap(); - let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_some_and_fail_update.as_slice()); + let read_chan_update_info_res: Result = + crate::util::ser::Readable::read( + &mut legacy_chan_update_info_with_some_and_fail_update.as_slice(), + ); assert!(read_chan_update_info_res.is_err()); let legacy_chan_update_info_with_none: Vec = >::from_hex("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap(); - let read_chan_update_info_res: Result = crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice()); + let read_chan_update_info_res: Result = + crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice()); assert!(read_chan_update_info_res.is_err()); // 2. Test encoding/decoding of ChannelInfo @@ -3739,7 +4288,8 @@ pub(crate) mod tests { let mut encoded_chan_info: Vec = Vec::new(); assert!(chan_info_none_updates.write(&mut encoded_chan_info).is_ok()); - let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap(); + let read_chan_info: ChannelInfo = + crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap(); assert_eq!(chan_info_none_updates, read_chan_info); // Check we can encode/decode ChannelInfo with ChannelUpdateInfo fields present. @@ -3759,7 +4309,8 @@ pub(crate) mod tests { let mut encoded_chan_info: Vec = Vec::new(); assert!(chan_info_some_updates.write(&mut encoded_chan_info).is_ok()); - let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap(); + let read_chan_info: ChannelInfo = + crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap(); assert_eq!(chan_info_some_updates, read_chan_info); // Check the serialization hasn't changed. @@ -3769,13 +4320,17 @@ pub(crate) mod tests { // Check we can decode legacy ChannelInfo, even if the `two_to_one` / `one_to_two` / // `last_update_message` fields fail to decode due to missing htlc_maximum_msat. let legacy_chan_info_with_some_and_fail_update = >::from_hex("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap(); - let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_some_and_fail_update.as_slice()).unwrap(); + let read_chan_info: ChannelInfo = crate::util::ser::Readable::read( + &mut legacy_chan_info_with_some_and_fail_update.as_slice(), + ) + .unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); assert_eq!(read_chan_info.two_to_one, None); let legacy_chan_info_with_none: Vec = >::from_hex("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap(); - let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap(); + let read_chan_info: ChannelInfo = + crate::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap(); assert_eq!(read_chan_info.announcement_received_time, 87654); assert_eq!(read_chan_info.one_to_two, None); assert_eq!(read_chan_info.two_to_one, None); @@ -3785,17 +4340,20 @@ pub(crate) mod tests { fn node_info_is_readable() { // 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one let announcement_message = >::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); - let announcement_message = NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap(); + let announcement_message = + NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap(); let valid_node_ann_info = NodeAnnouncementInfo::Relayed(announcement_message); let mut encoded_valid_node_ann_info = Vec::new(); assert!(valid_node_ann_info.write(&mut encoded_valid_node_ann_info).is_ok()); - let read_valid_node_ann_info = NodeAnnouncementInfo::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap(); + let read_valid_node_ann_info = + NodeAnnouncementInfo::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap(); assert_eq!(read_valid_node_ann_info, valid_node_ann_info); assert_eq!(read_valid_node_ann_info.addresses().len(), 1); let encoded_invalid_node_ann_info = >::from_hex("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap(); - let read_invalid_node_ann_info_res = NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice()); + let read_invalid_node_ann_info_res = + NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice()); assert!(read_invalid_node_ann_info_res.is_err()); // 2. Check we can read a NodeInfo anyways, but set the NodeAnnouncementInfo to None if invalid @@ -3811,14 +4369,16 @@ pub(crate) mod tests { assert_eq!(read_valid_node_info, valid_node_info); let encoded_invalid_node_info_hex = >::from_hex("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap(); - let read_invalid_node_info = NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap(); + let read_invalid_node_info = + NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap(); assert_eq!(read_invalid_node_info.announcement_info, None); } #[test] fn test_node_info_keeps_compatibility() { let old_ann_info_with_addresses = >::from_hex("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); - let ann_info_with_addresses = NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice()) + let ann_info_with_addresses = + NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice()) .expect("to be able to read an old NodeAnnouncementInfo with addresses"); // This serialized info has no announcement_message but its address field should still be considered assert!(!ann_info_with_addresses.addresses().is_empty()); @@ -3827,7 +4387,10 @@ pub(crate) mod tests { #[test] fn test_node_id_display() { let node_id = NodeId([42; 33]); - assert_eq!(format!("{}", &node_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"); + assert_eq!( + format!("{}", &node_id), + "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + ); } #[test] @@ -3840,23 +4403,25 @@ pub(crate) mod tests { let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_1_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey)); - let announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &announcement).unwrap(); - let tcp_ip_v4 = SocketAddress::TcpIpV4 { - addr: [255, 254, 253, 252], - port: 9735 - }; + let tcp_ip_v4 = SocketAddress::TcpIpV4 { addr: [255, 254, 253, 252], port: 9735 }; let tcp_ip_v6 = SocketAddress::TcpIpV6 { addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240], - port: 9735 + port: 9735, }; - let onion_v2 = SocketAddress::OnionV2([255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]); + let onion_v2 = + SocketAddress::OnionV2([255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]); let onion_v3 = SocketAddress::OnionV3 { - ed25519_pubkey: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224], + ed25519_pubkey: [ + 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, + 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224, + ], checksum: 32, version: 16, - port: 9735 + port: 9735, }; let hostname = SocketAddress::Hostname { hostname: Hostname::try_from(String::from("host")).unwrap(), @@ -3872,36 +4437,40 @@ pub(crate) mod tests { let announcement = get_signed_node_announcement( |announcement| { announcement.addresses = vec![ - tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone(), - hostname.clone() + tcp_ip_v4.clone(), + tcp_ip_v6.clone(), + onion_v2.clone(), + onion_v3.clone(), + hostname.clone(), ]; announcement.timestamp += 1000; }, - node_1_privkey, &secp_ctx + node_1_privkey, + &secp_ctx, ); gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap(); assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); let announcement = get_signed_node_announcement( |announcement| { - announcement.addresses = vec![ - tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone() - ]; + announcement.addresses = + vec![tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()]; announcement.timestamp += 2000; }, - node_1_privkey, &secp_ctx + node_1_privkey, + &secp_ctx, ); gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap(); assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); let announcement = get_signed_node_announcement( |announcement| { - announcement.addresses = vec![ - tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone() - ]; + announcement.addresses = + vec![tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()]; announcement.timestamp += 3000; }, - node_1_privkey, &secp_ctx + node_1_privkey, + &secp_ctx, ); gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap(); assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); @@ -3911,7 +4480,8 @@ pub(crate) mod tests { announcement.addresses = vec![onion_v2.clone(), onion_v3.clone()]; announcement.timestamp += 4000; }, - node_1_privkey, &secp_ctx + node_1_privkey, + &secp_ctx, ); gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap(); assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); @@ -3921,7 +4491,8 @@ pub(crate) mod tests { announcement.addresses = vec![onion_v2.clone()]; announcement.timestamp += 5000; }, - node_1_privkey, &secp_ctx + node_1_privkey, + &secp_ctx, ); gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap(); assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); @@ -3931,7 +4502,8 @@ pub(crate) mod tests { announcement.addresses = vec![tcp_ip_v4.clone()]; announcement.timestamp += 6000; }, - node_1_privkey, &secp_ctx + node_1_privkey, + &secp_ctx, ); gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap(); assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only()); @@ -3941,17 +4513,19 @@ pub(crate) mod tests { #[cfg(ldk_bench)] pub mod benches { use super::*; - use std::io::Read; use criterion::{black_box, Criterion}; + use std::io::Read; pub fn read_network_graph(bench: &mut Criterion) { let logger = crate::util::test_utils::TestLogger::new(); let (mut d, _) = crate::routing::router::bench_utils::get_graph_scorer_file().unwrap(); let mut v = Vec::new(); d.read_to_end(&mut v).unwrap(); - bench.bench_function("read_network_graph", |b| b.iter(|| - NetworkGraph::read(&mut crate::io::Cursor::new(black_box(&v)), &logger).unwrap() - )); + bench.bench_function("read_network_graph", |b| { + b.iter(|| { + NetworkGraph::read(&mut crate::io::Cursor::new(black_box(&v)), &logger).unwrap() + }) + }); } pub fn write_network_graph(bench: &mut Criterion) { @@ -3960,8 +4534,6 @@ pub mod benches { let mut graph_buffer = Vec::new(); d.read_to_end(&mut graph_buffer).unwrap(); let net_graph = NetworkGraph::read(&mut &graph_buffer[..], &logger).unwrap(); - bench.bench_function("write_network_graph", |b| b.iter(|| - black_box(&net_graph).encode() - )); + bench.bench_function("write_network_graph", |b| b.iter(|| black_box(&net_graph).encode())); } }