X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=sidebyside;f=lightning%2Fsrc%2Frouting%2Fnetwork_graph.rs;h=39c2894b32c429b5115f4eee66c427bed21989b1;hb=f53d13bcb8220b3ce39e51a4d20beb23b3930d1f;hp=bdb305e7870a2b9a30e9236c34b516dc2328c873;hpb=2352587811afd37034ff23aba4de98d5efa4e220;p=rust-lightning diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index bdb305e7..39c2894b 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -9,6 +9,7 @@ //! The top-level network map tracking logic lives here. +use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; use bitcoin::secp256k1::key::PublicKey; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1; @@ -24,7 +25,7 @@ use chain; use chain::Access; use ln::features::{ChannelFeatures, NodeFeatures}; use ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; -use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField}; +use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField, GossipTimestampFilter}; use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; @@ -42,6 +43,13 @@ use sync::Mutex; use core::ops::Deref; use bitcoin::hashes::hex::ToHex; +#[cfg(feature = "std")] +use std::time::{SystemTime, UNIX_EPOCH}; + +/// We remove stale channel directional info two weeks after the last update, per BOLT 7's +/// suggestion. +const STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14; + /// The maximum number of extra bytes which we do not understand in a gossip message before we will /// refuse to relay the message. const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024; @@ -50,12 +58,75 @@ const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024; /// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations. const MAX_SCIDS_PER_REPLY: usize = 8000; +/// Represents the compressed public key of a node +#[derive(Clone, Copy)] +pub struct NodeId([u8; PUBLIC_KEY_SIZE]); + +impl NodeId { + /// Create a new NodeId from a public key + pub fn from_pubkey(pubkey: &PublicKey) -> Self { + NodeId(pubkey.serialize()) + } + + /// Get the public key slice from this NodeId + pub fn as_slice(&self) -> &[u8] { + &self.0 + } +} + +impl fmt::Debug for NodeId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "NodeId({})", log_bytes!(self.0)) + } +} + +impl core::hash::Hash for NodeId { + fn hash(&self, hasher: &mut H) { + self.0.hash(hasher); + } +} + +impl Eq for NodeId {} + +impl PartialEq for NodeId { + fn eq(&self, other: &Self) -> bool { + self.0[..] == other.0[..] + } +} + +impl cmp::PartialOrd for NodeId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for NodeId { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.0[..].cmp(&other.0[..]) + } +} + +impl Writeable for NodeId { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + writer.write_all(&self.0)?; + Ok(()) + } +} + +impl Readable for NodeId { + fn read(reader: &mut R) -> Result { + let mut buf = [0; PUBLIC_KEY_SIZE]; + reader.read_exact(&mut buf)?; + Ok(Self(buf)) + } +} + /// Represents the network as nodes and channels between them pub struct NetworkGraph { genesis_hash: BlockHash, // Lock order: channels -> nodes channels: RwLock>, - nodes: RwLock>, + nodes: RwLock>, } impl Clone for NetworkGraph { @@ -73,7 +144,7 @@ impl Clone for NetworkGraph { /// A read-only view of [`NetworkGraph`]. pub struct ReadOnlyNetworkGraph<'a> { channels: RwLockReadGuard<'a, BTreeMap>, - nodes: RwLockReadGuard<'a, BTreeMap>, + nodes: RwLockReadGuard<'a, BTreeMap>, } /// Update to the [`NetworkGraph`] based on payment failure information conveyed via the Onion @@ -122,7 +193,7 @@ impl_writeable_tlv_based_enum_upgradable!(NetworkUpdate, }, ); -impl EventHandler for NetGraphMsgHandler +impl, C: Deref, L: Deref> EventHandler for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { fn handle_event(&self, event: &Event) { if let Event::PaymentPathFailed { payment_hash: _, rejected_by_dest: _, network_update, .. } = event { @@ -141,19 +212,18 @@ where C::Target: chain::Access, L::Target: Logger { /// /// Serves as an [`EventHandler`] for applying updates from [`Event::PaymentPathFailed`] to the /// [`NetworkGraph`]. -pub struct NetGraphMsgHandler +pub struct NetGraphMsgHandler, C: Deref, L: Deref> where C::Target: chain::Access, L::Target: Logger { secp_ctx: Secp256k1, - /// Representation of the payment channel network - pub network_graph: NetworkGraph, + network_graph: G, chain_access: Option, full_syncs_requested: AtomicUsize, pending_events: Mutex>, logger: L, } -impl NetGraphMsgHandler +impl, C: Deref, L: Deref> NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { /// Creates a new tracker of the actual state of the network of channels and nodes, @@ -161,7 +231,7 @@ where C::Target: chain::Access, L::Target: Logger /// Chain monitor is used to make sure announced channels exist on-chain, /// channel data is correct, and that the announcement is signed with /// channel owners' keys. - pub fn new(network_graph: NetworkGraph, chain_access: Option, logger: L) -> Self { + pub fn new(network_graph: G, chain_access: Option, logger: L) -> Self { NetGraphMsgHandler { secp_ctx: Secp256k1::verification_only(), network_graph, @@ -179,6 +249,14 @@ where C::Target: chain::Access, L::Target: Logger self.chain_access = chain_access; } + /// Gets a reference to the underlying [`NetworkGraph`] which was provided in + /// [`NetGraphMsgHandler::new`]. + /// + /// (C-not exported) as bindings don't support a reference-to-a-reference yet + pub fn network_graph(&self) -> &G { + &self.network_graph + } + /// Returns true when a full routing table sync should be performed with a peer. fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { //TODO: Determine whether to request a full sync based on the network map. @@ -216,15 +294,26 @@ where C::Target: chain::Access, L::Target: Logger } macro_rules! secp_verify_sig { - ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr ) => { + ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr, $msg_type: expr ) => { match $secp_ctx.verify($msg, $sig, $pubkey) { Ok(_) => {}, - Err(_) => return Err(LightningError{err: "Invalid signature from remote node".to_owned(), action: ErrorAction::IgnoreError}), + Err(_) => { + return Err(LightningError { + err: format!("Invalid signature on {} message", $msg_type), + action: ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id: [0; 32], + data: format!("Invalid signature on {} message", $msg_type), + }, + log_level: Level::Trace, + }, + }); + }, } }; } -impl RoutingMessageHandler for NetGraphMsgHandler +impl, C: Deref, L: Deref> RoutingMessageHandler for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { @@ -236,7 +325,7 @@ where C::Target: chain::Access, L::Target: Logger fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { self.network_graph.update_channel_from_announcement(msg, &self.chain_access, &self.secp_ctx)?; - log_trace!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); + log_gossip!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } @@ -277,11 +366,11 @@ where C::Target: chain::Access, L::Target: Logger let mut result = Vec::with_capacity(batch_amount as usize); let nodes = self.network_graph.nodes.read().unwrap(); let mut iter = if let Some(pubkey) = starting_point { - let mut iter = nodes.range((*pubkey)..); + let mut iter = nodes.range(NodeId::from_pubkey(pubkey)..); iter.next(); iter } else { - nodes.range(..) + nodes.range::(..) }; while result.len() < batch_amount as usize { if let Some((_, ref node)) = iter.next() { @@ -306,74 +395,97 @@ where C::Target: chain::Access, L::Target: Logger /// to request gossip messages for each channel. The sync is considered complete /// when the final reply_scids_end message is received, though we are not /// tracking this directly. - fn sync_routing_table(&self, their_node_id: &PublicKey, init_msg: &Init) { - + fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init) { // We will only perform a sync with peers that support gossip_queries. if !init_msg.features.supports_gossip_queries() { return (); } - // Check if we need to perform a full synchronization with this peer - if !self.should_request_full_sync(their_node_id) { - return (); + // The lightning network's gossip sync system is completely broken in numerous ways. + // + // Given no broadly-available set-reconciliation protocol, the only reasonable approach is + // to do a full sync from the first few peers we connect to, and then receive gossip + // updates from all our peers normally. + // + // Originally, we could simply tell a peer to dump us the entire gossip table on startup, + // wasting lots of bandwidth but ensuring we have the full network graph. After the initial + // dump peers would always send gossip and we'd stay up-to-date with whatever our peer has + // seen. + // + // In order to reduce the bandwidth waste, "gossip queries" were introduced, allowing you + // to ask for the SCIDs of all channels in your peer's routing graph, and then only request + // channel data which you are missing. Except there was no way at all to identify which + // `channel_update`s you were missing, so you still had to request everything, just in a + // very complicated way with some queries instead of just getting the dump. + // + // Later, an option was added to fetch the latest timestamps of the `channel_update`s to + // make efficient sync possible, however it has yet to be implemented in lnd, which makes + // relying on it useless. + // + // After gossip queries were introduced, support for receiving a full gossip table dump on + // connection was removed from several nodes, making it impossible to get a full sync + // without using the "gossip queries" messages. + // + // Once you opt into "gossip queries" the only way to receive any gossip updates that a + // peer receives after you connect, you must send a `gossip_timestamp_filter` message. This + // message, as the name implies, tells the peer to not forward any gossip messages with a + // timestamp older than a given value (not the time the peer received the filter, but the + // timestamp in the update message, which is often hours behind when the peer received the + // message). + // + // Obnoxiously, `gossip_timestamp_filter` isn't *just* a filter, but its also a request for + // your peer to send you the full routing graph (subject to the filter). Thus, in order to + // tell a peer to send you any updates as it sees them, you have to also ask for the full + // routing graph to be synced. If you set a timestamp filter near the current time, peers + // will simply not forward any new updates they see to you which were generated some time + // ago (which is not uncommon). If you instead set a timestamp filter near 0 (or two weeks + // ago), you will always get the full routing graph from all your peers. + // + // Most lightning nodes today opt to simply turn off receiving gossip data which only + // propagated some time after it was generated, and, worse, often disable gossiping with + // several peers after their first connection. The second behavior can cause gossip to not + // propagate fully if there are cuts in the gossiping subgraph. + // + // In an attempt to cut a middle ground between always fetching the full graph from all of + // our peers and never receiving gossip from peers at all, we send all of our peers a + // `gossip_timestamp_filter`, with the filter time set either two weeks ago or an hour ago. + // + // For no-std builds, we bury our head in the sand and do a full sync on each connection. + let should_request_full_sync = self.should_request_full_sync(&their_node_id); + #[allow(unused_mut, unused_assignments)] + let mut gossip_start_time = 0; + #[cfg(feature = "std")] + { + gossip_start_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + if should_request_full_sync { + gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago + } else { + gossip_start_time -= 60 * 60; // an hour ago + } } - let first_blocknum = 0; - let number_of_blocks = 0xffffffff; - log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks); let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(MessageSendEvent::SendChannelRangeQuery { + pending_events.push(MessageSendEvent::SendGossipTimestampFilter { node_id: their_node_id.clone(), - msg: QueryChannelRange { + msg: GossipTimestampFilter { chain_hash: self.network_graph.genesis_hash, - first_blocknum, - number_of_blocks, + first_timestamp: gossip_start_time as u32, // 2106 issue! + timestamp_range: u32::max_value(), }, }); } - /// Statelessly processes a reply to a channel range query by immediately - /// sending an SCID query with SCIDs in the reply. To keep this handler - /// stateless, it does not validate the sequencing of replies for multi- - /// reply ranges. It does not validate whether the reply(ies) cover the - /// queried range. It also does not filter SCIDs to only those in the - /// original query range. We also do not validate that the chain_hash - /// matches the chain_hash of the NetworkGraph. Any chan_ann message that - /// does not match our chain_hash will be rejected when the announcement is - /// processed. - fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: ReplyChannelRange) -> Result<(), LightningError> { - log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, sync_complete={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete, msg.short_channel_ids.len(),); - - log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len()); - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(MessageSendEvent::SendShortIdsQuery { - node_id: their_node_id.clone(), - msg: QueryShortChannelIds { - chain_hash: msg.chain_hash, - short_channel_ids: msg.short_channel_ids, - } - }); - + fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { + // We don't make queries, so should never receive replies. If, in the future, the set + // reconciliation extensions to gossip queries become broadly supported, we should revert + // this code to its state pre-0.0.106. Ok(()) } - /// When an SCID query is initiated the remote peer will begin streaming - /// gossip messages. In the event of a failure, we may have received - /// some channel information. Before trying with another peer, the - /// caller should update its set of SCIDs that need to be queried. - fn handle_reply_short_channel_ids_end(&self, their_node_id: &PublicKey, msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { - log_debug!(self.logger, "Handling reply_short_channel_ids_end peer={}, full_information={}", log_pubkey!(their_node_id), msg.full_information); - - // If the remote node does not have up-to-date information for the - // chain_hash they will set full_information=false. We can fail - // the result and try again with a different peer. - if !msg.full_information { - return Err(LightningError { - err: String::from("Received reply_short_channel_ids_end with no information"), - action: ErrorAction::IgnoreError - }); - } - + fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { + // We don't make queries, so should never receive replies. If, in the future, the set + // reconciliation extensions to gossip queries become broadly supported, we should revert + // this code to its state pre-0.0.106. Ok(()) } @@ -490,7 +602,7 @@ where C::Target: chain::Access, L::Target: Logger } } -impl MessageSendEventsProvider for NetGraphMsgHandler +impl, C: Deref, L: Deref> MessageSendEventsProvider for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger, @@ -504,9 +616,8 @@ where } #[derive(Clone, Debug, PartialEq)] -/// Details about one direction of a channel. Received -/// within a channel update. -pub struct DirectionalChannelInfo { +/// Details about one direction of a channel as received within a [`ChannelUpdate`]. +pub struct ChannelUpdateInfo { /// When the last update to the channel direction was issued. /// Value is opaque, as set in the announcement. pub last_update: u32, @@ -527,14 +638,14 @@ pub struct DirectionalChannelInfo { pub last_update_message: Option, } -impl fmt::Display for DirectionalChannelInfo { +impl fmt::Display for ChannelUpdateInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fees {:?}", self.last_update, self.enabled, self.cltv_expiry_delta, self.htlc_minimum_msat, self.fees)?; Ok(()) } } -impl_writeable_tlv_based!(DirectionalChannelInfo, { +impl_writeable_tlv_based!(ChannelUpdateInfo, { (0, last_update, required), (2, enabled, required), (4, cltv_expiry_delta, required), @@ -551,13 +662,13 @@ pub struct ChannelInfo { /// Protocol features of a channel communicated during its announcement pub features: ChannelFeatures, /// Source node of the first direction of a channel - pub node_one: PublicKey, + pub node_one: NodeId, /// Details about the first direction of a channel - pub one_to_two: Option, + pub one_to_two: Option, /// Source node of the second direction of a channel - pub node_two: PublicKey, + pub node_two: NodeId, /// Details about the second direction of a channel - pub two_to_one: Option, + pub two_to_one: Option, /// The channel capacity as seen on-chain, if chain lookup is available. pub capacity_sats: Option, /// An initial announcement of the channel @@ -565,18 +676,55 @@ pub struct ChannelInfo { /// Everything else is useful only for sending out for initial routing sync. /// Not stored if contains excess data to prevent DoS. pub announcement_message: Option, + /// The timestamp when we received the announcement, if we are running with feature = "std" + /// (which we can probably assume we are - no-std environments probably won't have a full + /// network graph in memory!). + announcement_received_time: u64, +} + +impl ChannelInfo { + /// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a + /// returned `source`, or `None` if `target` is not one of the channel's counterparties. + pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { + let (direction, source) = { + if target == &self.node_one { + (self.two_to_one.as_ref(), &self.node_two) + } else if target == &self.node_two { + (self.one_to_two.as_ref(), &self.node_one) + } else { + return None; + } + }; + Some((DirectedChannelInfo { channel: self, direction }, source)) + } + + /// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a + /// returned `target`, or `None` if `source` is not one of the channel's counterparties. + pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { + let (direction, target) = { + if source == &self.node_one { + (self.one_to_two.as_ref(), &self.node_two) + } else if source == &self.node_two { + (self.two_to_one.as_ref(), &self.node_one) + } else { + return None; + } + }; + Some((DirectedChannelInfo { channel: self, direction }, target)) + } } impl fmt::Display for ChannelInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}", - log_bytes!(self.features.encode()), log_pubkey!(self.node_one), self.one_to_two, log_pubkey!(self.node_two), self.two_to_one)?; + log_bytes!(self.features.encode()), log_bytes!(self.node_one.as_slice()), self.one_to_two, log_bytes!(self.node_two.as_slice()), self.two_to_one)?; Ok(()) } } impl_writeable_tlv_based!(ChannelInfo, { (0, features, required), + (1, announcement_received_time, (default_value, 0)), (2, node_one, required), (4, one_to_two, required), (6, node_two, required), @@ -585,6 +733,132 @@ impl_writeable_tlv_based!(ChannelInfo, { (12, announcement_message, required), }); +/// A wrapper around [`ChannelInfo`] representing information about the channel as directed from a +/// source node to a target node. +#[derive(Clone)] +pub struct DirectedChannelInfo<'a> { + channel: &'a ChannelInfo, + direction: Option<&'a ChannelUpdateInfo>, +} + +impl<'a> DirectedChannelInfo<'a> { + /// Returns information for the channel. + pub fn channel(&self) -> &'a ChannelInfo { self.channel } + + /// Returns information for the direction. + pub fn direction(&self) -> Option<&'a ChannelUpdateInfo> { self.direction } + + /// Returns the [`EffectiveCapacity`] of the channel in the direction. + /// + /// This is either the total capacity from the funding transaction, if known, or the + /// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known, + /// whichever is smaller. + pub fn effective_capacity(&self) -> EffectiveCapacity { + let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); + self.direction + .and_then(|direction| direction.htlc_maximum_msat) + .map(|max_htlc_msat| { + let capacity_msat = capacity_msat.unwrap_or(u64::max_value()); + if max_htlc_msat < capacity_msat { + EffectiveCapacity::MaximumHTLC { amount_msat: max_htlc_msat } + } else { + EffectiveCapacity::Total { capacity_msat } + } + }) + .or_else(|| capacity_msat.map(|capacity_msat| + EffectiveCapacity::Total { capacity_msat })) + .unwrap_or(EffectiveCapacity::Unknown) + } + + /// Returns `Some` if [`ChannelUpdateInfo`] is available in the direction. + pub(super) fn with_update(self) -> Option> { + match self.direction { + Some(_) => Some(DirectedChannelInfoWithUpdate { inner: self }), + None => None, + } + } +} + +impl<'a> fmt::Debug for DirectedChannelInfo<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("DirectedChannelInfo") + .field("channel", &self.channel) + .finish() + } +} + +/// A [`DirectedChannelInfo`] with [`ChannelUpdateInfo`] available in its direction. +#[derive(Clone)] +pub(super) struct DirectedChannelInfoWithUpdate<'a> { + inner: DirectedChannelInfo<'a>, +} + +impl<'a> DirectedChannelInfoWithUpdate<'a> { + /// Returns information for the channel. + #[inline] + pub(super) fn channel(&self) -> &'a ChannelInfo { &self.inner.channel } + + /// Returns information for the direction. + #[inline] + pub(super) fn direction(&self) -> &'a ChannelUpdateInfo { self.inner.direction.unwrap() } + + /// Returns the [`EffectiveCapacity`] of the channel in the direction. + #[inline] + pub(super) fn effective_capacity(&self) -> EffectiveCapacity { self.inner.effective_capacity() } +} + +impl<'a> fmt::Debug for DirectedChannelInfoWithUpdate<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + self.inner.fmt(f) + } +} + +/// The effective capacity of a channel for routing purposes. +/// +/// While this may be smaller than the actual channel capacity, amounts greater than +/// [`Self::as_msat`] should not be routed through the channel. +pub enum EffectiveCapacity { + /// The available liquidity in the channel known from being a channel counterparty, and thus a + /// direct hop. + ExactLiquidity { + /// Either the inbound or outbound liquidity depending on the direction, denominated in + /// millisatoshi. + liquidity_msat: u64, + }, + /// The maximum HTLC amount in one direction as advertised on the gossip network. + MaximumHTLC { + /// The maximum HTLC amount denominated in millisatoshi. + amount_msat: u64, + }, + /// The total capacity of the channel as determined by the funding transaction. + Total { + /// The funding amount denominated in millisatoshi. + capacity_msat: u64, + }, + /// A capacity sufficient to route any payment, typically used for private channels provided by + /// an invoice. + Infinite, + /// A capacity that is unknown possibly because either the chain state is unavailable to know + /// the total capacity or the `htlc_maximum_msat` was not advertised on the gossip network. + Unknown, +} + +/// The presumed channel capacity denominated in millisatoshi for [`EffectiveCapacity::Unknown`] to +/// use when making routing decisions. +pub const UNKNOWN_CHANNEL_CAPACITY_MSAT: u64 = 250_000 * 1000; + +impl EffectiveCapacity { + /// Returns the effective capacity denominated in millisatoshi. + pub fn as_msat(&self) -> u64 { + match self { + EffectiveCapacity::ExactLiquidity { liquidity_msat } => *liquidity_msat, + EffectiveCapacity::MaximumHTLC { amount_msat } => *amount_msat, + EffectiveCapacity::Total { capacity_msat } => *capacity_msat, + EffectiveCapacity::Infinite => u64::max_value(), + EffectiveCapacity::Unknown => UNKNOWN_CHANNEL_CAPACITY_MSAT, + } + } +} /// Fees for routing via a given channel or a node #[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)] @@ -724,8 +998,8 @@ impl fmt::Display for NetworkGraph { writeln!(f, " {}: {}", key, val)?; } writeln!(f, "[Nodes]")?; - for (key, val) in self.nodes.read().unwrap().iter() { - writeln!(f, " {}: {}", log_pubkey!(key), val)?; + for (&node_id, val) in self.nodes.read().unwrap().iter() { + writeln!(f, " {}: {}", log_bytes!(node_id.as_slice()), val)?; } Ok(()) } @@ -759,6 +1033,15 @@ impl NetworkGraph { } } + /// Clears the `NodeAnnouncementInfo` field for all nodes in the `NetworkGraph` for testing + /// purposes. + #[cfg(test)] + pub fn clear_nodes_announcement_info(&self) { + for node in self.nodes.write().unwrap().iter_mut() { + node.1.announcement_info = None; + } + } + /// For an already known node (from channel announcements), update its stored properties from a /// given node announcement. /// @@ -767,7 +1050,7 @@ impl NetworkGraph { /// routing messages from a source using a protocol other than the lightning P2P protocol. pub fn update_node_from_announcement(&self, msg: &msgs::NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id, "node_announcement"); self.update_node_from_announcement_intern(&msg.contents, Some(&msg)) } @@ -780,12 +1063,17 @@ impl NetworkGraph { } fn update_node_from_announcement_intern(&self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>) -> Result<(), LightningError> { - match self.nodes.write().unwrap().get_mut(&msg.node_id) { + match self.nodes.write().unwrap().get_mut(&NodeId::from_pubkey(&msg.node_id)) { None => Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}), Some(node) => { if let Some(node_info) = node.announcement_info.as_ref() { - if node_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); + // The timestamp field is somewhat of a misnomer - the BOLTs use it to order + // updates to ensure you always have the latest one, only vaguely suggesting + // that it be at least the current time. + if node_info.last_update > msg.timestamp { + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)}); + } else if node_info.last_update == msg.timestamp { + return Err(LightningError{err: "Update had the same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); } } @@ -822,10 +1110,10 @@ impl NetworkGraph { C::Target: chain::Access, { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &msg.contents.node_id_1); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &msg.contents.node_id_2); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &msg.contents.bitcoin_key_1); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &msg.contents.bitcoin_key_2); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &msg.contents.node_id_1, "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &msg.contents.node_id_2, "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &msg.contents.bitcoin_key_1, "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &msg.contents.bitcoin_key_2, "channel_announcement"); self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), chain_access) } @@ -884,15 +1172,23 @@ impl NetworkGraph { }, }; + #[allow(unused_mut, unused_assignments)] + let mut announcement_received_time = 0; + #[cfg(feature = "std")] + { + announcement_received_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + } + let chan_info = ChannelInfo { features: msg.features.clone(), - node_one: msg.node_id_1.clone(), + node_one: NodeId::from_pubkey(&msg.node_id_1), one_to_two: None, - node_two: msg.node_id_2.clone(), + node_two: NodeId::from_pubkey(&msg.node_id_2), two_to_one: None, capacity_sats: utxo_value, announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY { full_msg.cloned() } else { None }, + announcement_received_time, }; let mut channels = self.channels.write().unwrap(); @@ -914,7 +1210,7 @@ impl NetworkGraph { Self::remove_channel_in_nodes(&mut nodes, &entry.get(), msg.short_channel_id); *entry.get_mut() = chan_info; } else { - return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}) + return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); } }, BtreeEntry::Vacant(entry) => { @@ -939,8 +1235,8 @@ impl NetworkGraph { }; } - add_channel_to_node!(msg.node_id_1); - add_channel_to_node!(msg.node_id_2); + add_channel_to_node!(NodeId::from_pubkey(&msg.node_id_1)); + add_channel_to_node!(NodeId::from_pubkey(&msg.node_id_2)); Ok(()) } @@ -977,12 +1273,78 @@ impl NetworkGraph { } } + #[cfg(feature = "std")] + /// Removes information about channels that we haven't heard any updates about in some time. + /// This can be used regularly to prune the network graph of channels that likely no longer + /// exist. + /// + /// While there is no formal requirement that nodes regularly re-broadcast their channel + /// updates every two weeks, the non-normative section of BOLT 7 currently suggests that + /// pruning occur for updates which are at least two weeks old, which we implement here. + /// + /// Note that for users of the `lightning-background-processor` crate this method may be + /// automatically called regularly for you. + /// + /// This method is only available with the `std` feature. See + /// [`NetworkGraph::remove_stale_channels_with_time`] for `no-std` use. + pub fn remove_stale_channels(&self) { + let time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + self.remove_stale_channels_with_time(time); + } + + /// Removes information about channels that we haven't heard any updates about in some time. + /// This can be used regularly to prune the network graph of channels that likely no longer + /// exist. + /// + /// While there is no formal requirement that nodes regularly re-broadcast their channel + /// updates every two weeks, the non-normative section of BOLT 7 currently suggests that + /// pruning occur for updates which are at least two weeks old, which we implement here. + /// + /// This function takes the current unix time as an argument. For users with the `std` feature + /// enabled, [`NetworkGraph::remove_stale_channels`] may be preferable. + pub fn remove_stale_channels_with_time(&self, current_time_unix: u64) { + let mut channels = self.channels.write().unwrap(); + // Time out if we haven't received an update in at least 14 days. + if current_time_unix > u32::max_value() as u64 { return; } // Remove by 2106 + if current_time_unix < STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS { return; } + let min_time_unix: u32 = (current_time_unix - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32; + // Sadly BTreeMap::retain was only stabilized in 1.53 so we can't switch to it for some + // time. + let mut scids_to_remove = Vec::new(); + for (scid, info) in channels.iter_mut() { + if info.one_to_two.is_some() && info.one_to_two.as_ref().unwrap().last_update < min_time_unix { + info.one_to_two = None; + } + if info.two_to_one.is_some() && info.two_to_one.as_ref().unwrap().last_update < min_time_unix { + info.two_to_one = None; + } + if info.one_to_two.is_none() && info.two_to_one.is_none() { + // We check the announcement_received_time here to ensure we don't drop + // announcements that we just received and are just waiting for our peer to send a + // channel_update for. + if info.announcement_received_time < min_time_unix as u64 { + scids_to_remove.push(*scid); + } + } + } + if !scids_to_remove.is_empty() { + let mut nodes = self.nodes.write().unwrap(); + for scid in scids_to_remove { + let info = channels.remove(&scid).expect("We just accessed this scid, it should be present"); + Self::remove_channel_in_nodes(&mut nodes, &info, scid); + } + } + } + /// For an already known (from announcement) channel, update info about one of the directions /// of the channel. /// /// You probably don't want to call this directly, instead relying on a NetGraphMsgHandler's /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. + /// + /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or + /// materially in the future will be rejected. pub fn update_channel(&self, msg: &msgs::ChannelUpdate, secp_ctx: &Secp256k1) -> Result<(), LightningError> { self.update_channel_intern(&msg.contents, Some(&msg), Some((&msg.signature, secp_ctx))) } @@ -990,6 +1352,9 @@ impl NetworkGraph { /// For an already known (from announcement) channel, update info about one of the directions /// of the channel without verifying the associated signatures. Because we aren't given the /// associated signatures here we cannot relay the channel update to any of our peers. + /// + /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or + /// materially in the future will be rejected. pub fn update_channel_unsigned(&self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { self.update_channel_intern(msg, None, None::<(&secp256k1::Signature, &Secp256k1)>) } @@ -999,6 +1364,19 @@ impl NetworkGraph { let chan_enabled = msg.flags & (1 << 1) != (1 << 1); let chan_was_enabled; + #[cfg(all(feature = "std", not(test), not(feature = "_test_utils")))] + { + // Note that many tests rely on being able to set arbitrarily old timestamps, thus we + // disable this check during tests! + let time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + if (msg.timestamp as u64) < time - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS { + return Err(LightningError{err: "channel_update is older than two weeks old".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)}); + } + if msg.timestamp as u64 > time + 60 * 60 * 24 { + return Err(LightningError{err: "channel_update has a timestamp more than a day in the future".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)}); + } + } + let mut channels = self.channels.write().unwrap(); match channels.get_mut(&msg.short_channel_id) { None => return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}), @@ -1016,21 +1394,33 @@ impl NetworkGraph { } } } - macro_rules! maybe_update_channel_info { - ( $target: expr, $src_node: expr) => { + macro_rules! check_update_latest { + ($target: expr) => { if let Some(existing_chan_info) = $target.as_ref() { - if existing_chan_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); + // The timestamp field is somewhat of a misnomer - the BOLTs use it to + // order updates to ensure you always have the latest one, only + // suggesting that it be at least the current time. For + // channel_updates specifically, the BOLTs discuss the possibility of + // pruning based on the timestamp field being more than two weeks old, + // but only in the non-normative section. + if existing_chan_info.last_update > msg.timestamp { + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)}); + } else if existing_chan_info.last_update == msg.timestamp { + return Err(LightningError{err: "Update had same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); } chan_was_enabled = existing_chan_info.enabled; } else { chan_was_enabled = false; } + } + } + macro_rules! get_new_channel_info { + () => { { let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY { full_msg.cloned() } else { None }; - let updated_channel_dir_info = DirectionalChannelInfo { + let updated_channel_update_info = ChannelUpdateInfo { enabled: chan_enabled, last_update: msg.timestamp, cltv_expiry_delta: msg.cltv_expiry_delta, @@ -1042,23 +1432,31 @@ impl NetworkGraph { }, last_update_message }; - $target = Some(updated_channel_dir_info); - } + Some(updated_channel_update_info) + } } } let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); if msg.flags & 1 == 1 { dest_node_id = channel.node_one.clone(); + check_update_latest!(channel.two_to_one); if let Some((sig, ctx)) = sig_info { - secp_verify_sig!(ctx, &msg_hash, &sig, &channel.node_two); + secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_two.as_slice()).map_err(|_| LightningError{ + err: "Couldn't parse source node pubkey".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug) + })?, "channel_update"); } - maybe_update_channel_info!(channel.two_to_one, channel.node_two); + channel.two_to_one = get_new_channel_info!(); } else { dest_node_id = channel.node_two.clone(); + check_update_latest!(channel.one_to_two); if let Some((sig, ctx)) = sig_info { - secp_verify_sig!(ctx, &msg_hash, &sig, &channel.node_one); + secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_one.as_slice()).map_err(|_| LightningError{ + err: "Couldn't parse destination node pubkey".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug) + })?, "channel_update"); } - maybe_update_channel_info!(channel.one_to_two, channel.node_one); + channel.one_to_two = get_new_channel_info!(); } } } @@ -1104,7 +1502,7 @@ impl NetworkGraph { Ok(()) } - fn remove_channel_in_nodes(nodes: &mut BTreeMap, chan: &ChannelInfo, short_channel_id: u64) { + fn remove_channel_in_nodes(nodes: &mut BTreeMap, chan: &ChannelInfo, short_channel_id: u64) { macro_rules! remove_from_node { ($node_id: expr) => { if let BtreeEntry::Occupied(mut entry) = nodes.entry($node_id) { @@ -1136,19 +1534,17 @@ impl ReadOnlyNetworkGraph<'_> { /// Returns all known nodes' public keys along with announced node info. /// /// (C-not exported) because we have no mapping for `BTreeMap`s - pub fn nodes(&self) -> &BTreeMap { + pub fn nodes(&self) -> &BTreeMap { &*self.nodes } /// Get network addresses by node id. /// Returns None if the requested node is completely unknown, /// or if node announcement for the node was never received. - /// - /// (C-not exported) as there is no practical way to track lifetimes of returned values. - pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<&Vec> { - if let Some(node) = self.nodes.get(pubkey) { + pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { + if let Some(node) = self.nodes.get(&NodeId::from_pubkey(&pubkey)) { if let Some(node_info) = node.announcement_info.as_ref() { - return Some(&node_info.addresses) + return Some(node_info.addresses.clone()) } } None @@ -1163,18 +1559,20 @@ mod tests { use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, NetworkUpdate, MAX_EXCESS_BYTES_FOR_RELAY}; use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, - ReplyChannelRange, ReplyShortChannelIdsEnd, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; + ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; use util::test_utils; use util::logger::Logger; use util::ser::{Readable, Writeable}; use util::events::{Event, EventHandler, MessageSendEvent, MessageSendEventsProvider}; use util::scid_utils::scid_from_parts; + use super::STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS; + use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; use bitcoin::network::constants::Network; use bitcoin::blockdata::constants::genesis_block; - use bitcoin::blockdata::script::Builder; + use bitcoin::blockdata::script::{Builder, Script}; use bitcoin::blockdata::transaction::TxOut; use bitcoin::blockdata::opcodes; @@ -1187,18 +1585,24 @@ mod tests { use prelude::*; use sync::Arc; - fn create_net_graph_msg_handler() -> (Secp256k1, NetGraphMsgHandler, Arc>) { + fn create_network_graph() -> NetworkGraph { + let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); + NetworkGraph::new(genesis_hash) + } + + fn create_net_graph_msg_handler(network_graph: &NetworkGraph) -> ( + Secp256k1, NetGraphMsgHandler<&NetworkGraph, Arc, Arc> + ) { let secp_ctx = Secp256k1::new(); let logger = Arc::new(test_utils::TestLogger::new()); - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let network_graph = NetworkGraph::new(genesis_hash); let net_graph_msg_handler = NetGraphMsgHandler::new(network_graph, None, Arc::clone(&logger)); (secp_ctx, net_graph_msg_handler) } #[test] fn request_full_sync_finite_times() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); assert!(net_graph_msg_handler.should_request_full_sync(&node_id)); @@ -1209,35 +1613,95 @@ mod tests { assert!(!net_graph_msg_handler.should_request_full_sync(&node_id)); } - #[test] - fn handling_node_announcements() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); - - let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); - let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); - let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); - let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); - let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); - let zero_hash = Sha256dHash::hash(&[0; 32]); - let first_announcement_time = 500; - + fn get_signed_node_announcement(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> NodeAnnouncement { + let node_id = PublicKey::from_secret_key(&secp_ctx, node_key); let mut unsigned_announcement = UnsignedNodeAnnouncement { features: NodeFeatures::known(), - timestamp: first_announcement_time, - node_id: node_id_1, + timestamp: 100, + node_id: node_id, rgb: [0; 3], alias: [0; 32], addresses: Vec::new(), excess_address_data: Vec::new(), excess_data: Vec::new(), }; - let mut msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = NodeAnnouncement { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_announcement.clone() + f(&mut unsigned_announcement); + let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); + NodeAnnouncement { + signature: secp_ctx.sign(&msghash, node_key), + contents: unsigned_announcement + } + } + + fn get_signed_channel_announcement(f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelAnnouncement { + let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_key); + let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_key); + let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); + let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); + + let mut unsigned_announcement = UnsignedChannelAnnouncement { + features: ChannelFeatures::known(), + chain_hash: genesis_block(Network::Testnet).header.block_hash(), + short_channel_id: 0, + node_id_1, + node_id_2, + bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), + bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), + excess_data: Vec::new(), + }; + f(&mut unsigned_announcement); + let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); + ChannelAnnouncement { + node_signature_1: secp_ctx.sign(&msghash, node_1_key), + node_signature_2: secp_ctx.sign(&msghash, node_2_key), + bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), + bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), + contents: unsigned_announcement, + } + } + + fn get_channel_script(secp_ctx: &Secp256k1) -> Script { + let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); + let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); + Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2) + .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_1_btckey).serialize()) + .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey).serialize()) + .push_opcode(opcodes::all::OP_PUSHNUM_2) + .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script() + .to_v0_p2wsh() + } + + fn get_signed_channel_update(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelUpdate { + let mut unsigned_channel_update = UnsignedChannelUpdate { + chain_hash: genesis_block(Network::Testnet).header.block_hash(), + short_channel_id: 0, + timestamp: 100, + flags: 0, + cltv_expiry_delta: 144, + htlc_minimum_msat: 1_000_000, + htlc_maximum_msat: OptionalField::Absent, + fee_base_msat: 10_000, + fee_proportional_millionths: 20, + excess_data: Vec::new() }; + f(&mut unsigned_channel_update); + let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); + ChannelUpdate { + signature: secp_ctx.sign(&msghash, node_key), + contents: unsigned_channel_update + } + } + + #[test] + fn handling_node_announcements() { + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); + + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + let zero_hash = Sha256dHash::hash(&[0; 32]); + let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!("No existing channels for node_announcement", e.err) @@ -1245,25 +1709,7 @@ mod tests { { // Announce a channel to add a corresponding node. - let unsigned_announcement = UnsignedChannelAnnouncement { - features: ChannelFeatures::known(), - chain_hash: genesis_block(Network::Testnet).header.block_hash(), - short_channel_id: 0, - node_id_1, - node_id_2, - bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), - bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), - excess_data: Vec::new(), - }; - - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() @@ -1279,34 +1725,27 @@ mod tests { match net_graph_msg_handler.handle_node_announcement( &NodeAnnouncement { signature: secp_ctx.sign(&fake_msghash, node_1_privkey), - contents: unsigned_announcement.clone() + contents: valid_announcement.contents.clone() }) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Invalid signature from remote node") + Err(e) => assert_eq!(e.err, "Invalid signature on node_announcement message") }; - unsigned_announcement.timestamp += 1000; - unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); - msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let announcement_with_data = NodeAnnouncement { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_announcement.clone() - }; + let announcement_with_data = get_signed_node_announcement(|unsigned_announcement| { + unsigned_announcement.timestamp += 1000; + unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); + }, node_1_privkey, &secp_ctx); // Return false because contains excess data. match net_graph_msg_handler.handle_node_announcement(&announcement_with_data) { Ok(res) => assert!(!res), Err(_) => panic!() }; - unsigned_announcement.excess_data = Vec::new(); // Even though previous announcement was not relayed further, we still accepted it, // so we now won't accept announcements before the previous one. - unsigned_announcement.timestamp -= 10; - msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let outdated_announcement = NodeAnnouncement { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_announcement.clone() - }; + let outdated_announcement = get_signed_node_announcement(|unsigned_announcement| { + unsigned_announcement.timestamp += 1000 - 10; + }, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_node_announcement(&outdated_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Update older than last processed update") @@ -1320,49 +1759,20 @@ mod tests { let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); - let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); - let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); - let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); - - let good_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2) - .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_1_btckey).serialize()) - .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey).serialize()) - .push_opcode(opcodes::all::OP_PUSHNUM_2) - .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); - - let mut unsigned_announcement = UnsignedChannelAnnouncement { - features: ChannelFeatures::known(), - chain_hash: genesis_block(Network::Testnet).header.block_hash(), - short_channel_id: 0, - node_id_1, - node_id_2, - bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), - bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), - excess_data: Vec::new(), - }; - - let mut msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let good_script = get_channel_script(&secp_ctx); + let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); // Test if the UTXO lookups were not supported let network_graph = NetworkGraph::new(genesis_block(Network::Testnet).header.block_hash()); - let mut net_graph_msg_handler = NetGraphMsgHandler::new(network_graph, None, Arc::clone(&logger)); + let mut net_graph_msg_handler = NetGraphMsgHandler::new(&network_graph, None, Arc::clone(&logger)); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; { - let network = &net_graph_msg_handler.network_graph; - match network.read_only().channels().get(&unsigned_announcement.short_channel_id) { + match network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id) { None => panic!(), Some(_) => () }; @@ -1379,43 +1789,28 @@ mod tests { let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx); let network_graph = NetworkGraph::new(genesis_block(Network::Testnet).header.block_hash()); - net_graph_msg_handler = NetGraphMsgHandler::new(network_graph, Some(chain_source.clone()), Arc::clone(&logger)); - unsigned_announcement.short_channel_id += 1; - - msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + net_graph_msg_handler = NetGraphMsgHandler::new(&network_graph, Some(chain_source.clone()), Arc::clone(&logger)); + let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { + unsigned_announcement.short_channel_id += 1; + }, node_1_privkey, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry") }; // Now test if the transaction is found in the UTXO set and the script is correct. - unsigned_announcement.short_channel_id += 1; *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script.clone() }); - - msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { + unsigned_announcement.short_channel_id += 2; + }, node_1_privkey, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; { - let network = &net_graph_msg_handler.network_graph; - match network.read_only().channels().get(&unsigned_announcement.short_channel_id) { + match network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id) { None => panic!(), Some(_) => () }; @@ -1431,22 +1826,16 @@ mod tests { // But if it is confirmed, replace the channel *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script }); - unsigned_announcement.features = ChannelFeatures::empty(); - msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { + unsigned_announcement.features = ChannelFeatures::empty(); + unsigned_announcement.short_channel_id += 2; + }, node_1_privkey, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; { - let network = &net_graph_msg_handler.network_graph; - match network.read_only().channels().get(&unsigned_announcement.short_channel_id) { + match network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id) { Some(channel_entry) => { assert_eq!(channel_entry.features, ChannelFeatures::empty()); }, @@ -1455,43 +1844,23 @@ mod tests { } // Don't relay valid channels with excess data - unsigned_announcement.short_channel_id += 1; - unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); - msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { + unsigned_announcement.short_channel_id += 3; + unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); + }, node_1_privkey, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(!res), _ => panic!() }; - unsigned_announcement.excess_data = Vec::new(); - let invalid_sig_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_1_btckey), - contents: unsigned_announcement.clone(), - }; + let mut invalid_sig_announcement = valid_announcement.clone(); + invalid_sig_announcement.contents.excess_data = Vec::new(); match net_graph_msg_handler.handle_channel_announcement(&invalid_sig_announcement) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Invalid signature from remote node") + Err(e) => assert_eq!(e.err, "Invalid signature on channel_announcement message") }; - unsigned_announcement.node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); - msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let channel_to_itself_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_2_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let channel_to_itself_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_announcement(&channel_to_itself_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself") @@ -1504,47 +1873,21 @@ mod tests { let logger: Arc = Arc::new(test_utils::TestLogger::new()); let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); let network_graph = NetworkGraph::new(genesis_block(Network::Testnet).header.block_hash()); - let net_graph_msg_handler = NetGraphMsgHandler::new(network_graph, Some(chain_source.clone()), Arc::clone(&logger)); + let net_graph_msg_handler = NetGraphMsgHandler::new(&network_graph, Some(chain_source.clone()), Arc::clone(&logger)); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); - let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); - let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); - let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); - let zero_hash = Sha256dHash::hash(&[0; 32]); - let short_channel_id = 0; - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); let amount_sats = 1000_000; + let short_channel_id; { // Announce a channel we will update - let good_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2) - .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_1_btckey).serialize()) - .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey).serialize()) - .push_opcode(opcodes::all::OP_PUSHNUM_2) - .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); + let good_script = get_channel_script(&secp_ctx); *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: amount_sats, script_pubkey: good_script.clone() }); - let unsigned_announcement = UnsignedChannelAnnouncement { - features: ChannelFeatures::empty(), - chain_hash, - short_channel_id, - node_id_1, - node_id_2, - bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), - bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), - excess_data: Vec::new(), - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_channel_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + short_channel_id = valid_channel_announcement.contents.short_channel_id; match net_graph_msg_handler.handle_channel_announcement(&valid_channel_announcement) { Ok(_) => (), Err(_) => panic!() @@ -1552,32 +1895,14 @@ mod tests { } - let mut unsigned_channel_update = UnsignedChannelUpdate { - chain_hash, - short_channel_id, - timestamp: 100, - flags: 0, - cltv_expiry_delta: 144, - htlc_minimum_msat: 1000000, - htlc_maximum_msat: OptionalField::Absent, - fee_base_msat: 10000, - fee_proportional_millionths: 20, - excess_data: Vec::new() - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); - let valid_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; - + let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(res) => assert!(res), _ => panic!() }; { - let network = &net_graph_msg_handler.network_graph; - match network.read_only().channels().get(&short_channel_id) { + match network_graph.read_only().channels().get(&short_channel_id) { None => panic!(), Some(channel_info) => { assert_eq!(channel_info.one_to_two.as_ref().unwrap().cltv_expiry_delta, 144); @@ -1586,85 +1911,63 @@ mod tests { }; } - unsigned_channel_update.timestamp += 100; - unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); - let valid_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; + let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.timestamp += 100; + unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); + }, node_1_privkey, &secp_ctx); // Return false because contains excess data match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(res) => assert!(!res), _ => panic!() }; - unsigned_channel_update.timestamp += 10; - - unsigned_channel_update.short_channel_id += 1; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); - let valid_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; + let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.timestamp += 110; + unsigned_channel_update.short_channel_id += 1; + }, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Couldn't find channel for update") }; - unsigned_channel_update.short_channel_id = short_channel_id; - - unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(MAX_VALUE_MSAT + 1); - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); - let valid_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; + let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(MAX_VALUE_MSAT + 1); + unsigned_channel_update.timestamp += 110; + }, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than maximum possible msats") }; - unsigned_channel_update.htlc_maximum_msat = OptionalField::Absent; - - unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(amount_sats * 1000 + 1); - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); - let valid_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; + let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(amount_sats * 1000 + 1); + unsigned_channel_update.timestamp += 110; + }, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than channel capacity or capacity is bogus") }; - unsigned_channel_update.htlc_maximum_msat = OptionalField::Absent; // Even though previous update was not relayed further, we still accepted it, // so we now won't accept update before the previous one. - unsigned_channel_update.timestamp -= 10; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); - let valid_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; - + let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.timestamp += 100; + }, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Update older than last processed update") + Err(e) => assert_eq!(e.err, "Update had same timestamp as last processed update") }; - unsigned_channel_update.timestamp += 500; + let mut invalid_sig_channel_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.timestamp += 500; + }, node_1_privkey, &secp_ctx); + let zero_hash = Sha256dHash::hash(&[0; 32]); let fake_msghash = hash_to_message!(&zero_hash); - let invalid_sig_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&fake_msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; - + invalid_sig_channel_update.signature = secp_ctx.sign(&fake_msghash, node_1_privkey); match net_graph_msg_handler.handle_channel_update(&invalid_sig_channel_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Invalid signature from remote node") + Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message") }; - } #[test] @@ -1673,71 +1976,31 @@ mod tests { let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); let network_graph = NetworkGraph::new(genesis_hash); - let net_graph_msg_handler = NetGraphMsgHandler::new(network_graph, Some(chain_source.clone()), &logger); + let net_graph_msg_handler = NetGraphMsgHandler::new(&network_graph, Some(chain_source.clone()), &logger); let secp_ctx = Secp256k1::new(); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); - let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); - let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); - let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); - - let short_channel_id = 0; - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); - let network_graph = &net_graph_msg_handler.network_graph; { // There is no nodes in the table at the beginning. assert_eq!(network_graph.read_only().nodes().len(), 0); } + let short_channel_id; { // Announce a channel we will update - let unsigned_announcement = UnsignedChannelAnnouncement { - features: ChannelFeatures::empty(), - chain_hash, - short_channel_id, - node_id_1, - node_id_2, - bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), - bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), - excess_data: Vec::new(), - }; - - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_channel_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + short_channel_id = valid_channel_announcement.contents.short_channel_id; let chain_source: Option<&test_utils::TestChainSource> = None; assert!(network_graph.update_channel_from_announcement(&valid_channel_announcement, &chain_source, &secp_ctx).is_ok()); assert!(network_graph.read_only().channels().get(&short_channel_id).is_some()); - let unsigned_channel_update = UnsignedChannelUpdate { - chain_hash, - short_channel_id, - timestamp: 100, - flags: 0, - cltv_expiry_delta: 144, - htlc_minimum_msat: 1000000, - htlc_maximum_msat: OptionalField::Absent, - fee_base_msat: 10000, - fee_proportional_millionths: 20, - excess_data: Vec::new() - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); - let valid_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; - + let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); net_graph_msg_handler.handle_event(&Event::PaymentPathFailed { + payment_id: None, payment_hash: PaymentHash([0; 32]), rejected_by_dest: false, all_paths_failed: true, @@ -1745,6 +2008,8 @@ mod tests { network_update: Some(NetworkUpdate::ChannelUpdateMessage { msg: valid_channel_update, }), + short_channel_id: None, + retry: None, error_code: None, error_data: None, }); @@ -1762,6 +2027,7 @@ mod tests { }; net_graph_msg_handler.handle_event(&Event::PaymentPathFailed { + payment_id: None, payment_hash: PaymentHash([0; 32]), rejected_by_dest: false, all_paths_failed: true, @@ -1770,6 +2036,8 @@ mod tests { short_channel_id, is_permanent: false, }), + short_channel_id: None, + retry: None, error_code: None, error_data: None, }); @@ -1783,65 +2051,91 @@ mod tests { } // Permanent closing deletes a channel + net_graph_msg_handler.handle_event(&Event::PaymentPathFailed { + payment_id: None, + payment_hash: PaymentHash([0; 32]), + rejected_by_dest: false, + all_paths_failed: true, + path: vec![], + network_update: Some(NetworkUpdate::ChannelClosed { + short_channel_id, + is_permanent: true, + }), + short_channel_id: None, + retry: None, + error_code: None, + error_data: None, + }); + + assert_eq!(network_graph.read_only().channels().len(), 0); + // Nodes are also deleted because there are no associated channels anymore + assert_eq!(network_graph.read_only().nodes().len(), 0); + // TODO: Test NetworkUpdate::NodeFailure, which is not implemented yet. + } + + #[test] + fn test_channel_timeouts() { + // Test the removal of channels with `remove_stale_channels`. + let logger = test_utils::TestLogger::new(); + let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); + let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); + let network_graph = NetworkGraph::new(genesis_hash); + let net_graph_msg_handler = NetGraphMsgHandler::new(&network_graph, Some(chain_source.clone()), &logger); + let secp_ctx = Secp256k1::new(); + + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + + let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let short_channel_id = valid_channel_announcement.contents.short_channel_id; + let chain_source: Option<&test_utils::TestChainSource> = None; + assert!(network_graph.update_channel_from_announcement(&valid_channel_announcement, &chain_source, &secp_ctx).is_ok()); + assert!(network_graph.read_only().channels().get(&short_channel_id).is_some()); + + let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); + assert!(net_graph_msg_handler.handle_channel_update(&valid_channel_update).is_ok()); + assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); + + network_graph.remove_stale_channels_with_time(100 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS); + assert_eq!(network_graph.read_only().channels().len(), 1); + assert_eq!(network_graph.read_only().nodes().len(), 2); + + network_graph.remove_stale_channels_with_time(101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS); + #[cfg(feature = "std")] { - net_graph_msg_handler.handle_event(&Event::PaymentPathFailed { - payment_hash: PaymentHash([0; 32]), - rejected_by_dest: false, - all_paths_failed: true, - path: vec![], - network_update: Some(NetworkUpdate::ChannelClosed { - short_channel_id, - is_permanent: true, - }), - error_code: None, - error_data: None, - }); + // In std mode, a further check is performed before fully removing the channel - + // the channel_announcement must have been received at least two weeks ago. We + // fudge that here by indicating the time has jumped two weeks. Note that the + // directional channel information will have been removed already.. + assert_eq!(network_graph.read_only().channels().len(), 1); + assert_eq!(network_graph.read_only().nodes().len(), 2); + assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); - assert_eq!(network_graph.read_only().channels().len(), 0); - // Nodes are also deleted because there are no associated channels anymore - assert_eq!(network_graph.read_only().nodes().len(), 0); + use std::time::{SystemTime, UNIX_EPOCH}; + let announcement_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + network_graph.remove_stale_channels_with_time(announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS); } - // TODO: Test NetworkUpdate::NodeFailure, which is not implemented yet. + + assert_eq!(network_graph.read_only().channels().len(), 0); + assert_eq!(network_graph.read_only().nodes().len(), 0); } #[test] fn getting_next_channel_announcements() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); - let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); - let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); - let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); - - let short_channel_id = 1; - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); // Channels were not announced yet. let channels_with_announcements = net_graph_msg_handler.get_next_channel_announcements(0, 1); assert_eq!(channels_with_announcements.len(), 0); + let short_channel_id; { // Announce a channel we will update - let unsigned_announcement = UnsignedChannelAnnouncement { - features: ChannelFeatures::empty(), - chain_hash, - short_channel_id, - node_id_1, - node_id_2, - bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), - bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), - excess_data: Vec::new(), - }; - - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_channel_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + short_channel_id = valid_channel_announcement.contents.short_channel_id; match net_graph_msg_handler.handle_channel_announcement(&valid_channel_announcement) { Ok(_) => (), Err(_) => panic!() @@ -1862,23 +2156,9 @@ mod tests { { // Valid channel update - let unsigned_channel_update = UnsignedChannelUpdate { - chain_hash, - short_channel_id, - timestamp: 101, - flags: 0, - cltv_expiry_delta: 144, - htlc_minimum_msat: 1000000, - htlc_maximum_msat: OptionalField::Absent, - fee_base_msat: 10000, - fee_proportional_millionths: 20, - excess_data: Vec::new() - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); - let valid_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; + let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.timestamp = 101; + }, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => (), Err(_) => panic!() @@ -1896,26 +2176,12 @@ mod tests { panic!(); } - { // Channel update with excess data. - let unsigned_channel_update = UnsignedChannelUpdate { - chain_hash, - short_channel_id, - timestamp: 102, - flags: 0, - cltv_expiry_delta: 144, - htlc_minimum_msat: 1000000, - htlc_maximum_msat: OptionalField::Absent, - fee_base_msat: 10000, - fee_proportional_millionths: 20, - excess_data: [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec() - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); - let valid_channel_update = ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_channel_update.clone() - }; + let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { + unsigned_channel_update.timestamp = 102; + unsigned_channel_update.excess_data = [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(); + }, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => (), Err(_) => panic!() @@ -1940,16 +2206,11 @@ mod tests { #[test] fn getting_next_node_announcements() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); - let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); - let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); - let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); - - let short_channel_id = 1; - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); // No nodes yet. let next_announcements = net_graph_msg_handler.get_next_node_announcements(None, 10); @@ -1957,25 +2218,7 @@ mod tests { { // Announce a channel to add 2 nodes - let unsigned_announcement = UnsignedChannelAnnouncement { - features: ChannelFeatures::empty(), - chain_hash, - short_channel_id, - node_id_1, - node_id_2, - bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), - bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), - excess_data: Vec::new(), - }; - - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_channel_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_announcement(&valid_channel_announcement) { Ok(_) => (), Err(_) => panic!() @@ -1988,33 +2231,13 @@ mod tests { assert_eq!(next_announcements.len(), 0); { - let mut unsigned_announcement = UnsignedNodeAnnouncement { - features: NodeFeatures::known(), - timestamp: 1000, - node_id: node_id_1, - rgb: [0; 3], - alias: [0; 32], - addresses: Vec::new(), - excess_address_data: Vec::new(), - excess_data: Vec::new(), - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = NodeAnnouncement { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_announcement.clone() - }; + let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(_) => (), Err(_) => panic!() }; - unsigned_announcement.node_id = node_id_2; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = NodeAnnouncement { - signature: secp_ctx.sign(&msghash, node_2_privkey), - contents: unsigned_announcement.clone() - }; - + let valid_announcement = get_signed_node_announcement(|_| {}, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(_) => (), Err(_) => panic!() @@ -2030,21 +2253,10 @@ mod tests { { // Later announcement which should not be relayed (excess data) prevent us from sharing a node - let unsigned_announcement = UnsignedNodeAnnouncement { - features: NodeFeatures::known(), - timestamp: 1010, - node_id: node_id_2, - rgb: [0; 3], - alias: [0; 32], - addresses: Vec::new(), - excess_address_data: Vec::new(), - excess_data: [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(), - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = NodeAnnouncement { - signature: secp_ctx.sign(&msghash, node_2_privkey), - contents: unsigned_announcement.clone() - }; + let valid_announcement = get_signed_node_announcement(|unsigned_announcement| { + unsigned_announcement.timestamp += 10; + unsigned_announcement.excess_data = [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(); + }, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(res) => assert!(!res), Err(_) => panic!() @@ -2057,217 +2269,81 @@ mod tests { #[test] fn network_graph_serialization() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); - let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); // Announce a channel to add a corresponding node. - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); - let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); - let unsigned_announcement = UnsignedChannelAnnouncement { - features: ChannelFeatures::known(), - chain_hash: genesis_block(Network::Testnet).header.block_hash(), - short_channel_id: 0, - node_id_1, - node_id_2, - bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), - bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), - excess_data: Vec::new(), - }; - - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; - - let node_id = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); - let unsigned_announcement = UnsignedNodeAnnouncement { - features: NodeFeatures::known(), - timestamp: 100, - node_id, - rgb: [0; 3], - alias: [0; 32], - addresses: Vec::new(), - excess_address_data: Vec::new(), - excess_data: Vec::new(), - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = NodeAnnouncement { - signature: secp_ctx.sign(&msghash, node_1_privkey), - contents: unsigned_announcement.clone() - }; - + let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(_) => (), Err(_) => panic!() }; - let network = &net_graph_msg_handler.network_graph; let mut w = test_utils::TestVecWriter(Vec::new()); - assert!(!network.read_only().nodes().is_empty()); - assert!(!network.read_only().channels().is_empty()); - network.write(&mut w).unwrap(); - assert!(::read(&mut io::Cursor::new(&w.0)).unwrap() == *network); + assert!(!network_graph.read_only().nodes().is_empty()); + assert!(!network_graph.read_only().channels().is_empty()); + network_graph.write(&mut w).unwrap(); + assert!(::read(&mut io::Cursor::new(&w.0)).unwrap() == network_graph); } #[test] + #[cfg(feature = "std")] fn calling_sync_routing_table() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + use std::time::{SystemTime, UNIX_EPOCH}; + + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); let chain_hash = genesis_block(Network::Testnet).header.block_hash(); - let first_blocknum = 0; - let number_of_blocks = 0xffff_ffff; // It should ignore if gossip_queries feature is not enabled { - let init_msg = Init { features: InitFeatures::known().clear_gossip_queries() }; - net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg); + let init_msg = Init { features: InitFeatures::known().clear_gossip_queries(), remote_network_address: None }; + net_graph_msg_handler.peer_connected(&node_id_1, &init_msg); let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); } - // It should send a query_channel_message with the correct information + // It should send a gossip_timestamp_filter with the correct information { - let init_msg = Init { features: InitFeatures::known() }; - net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg); + let init_msg = Init { features: InitFeatures::known(), remote_network_address: None }; + net_graph_msg_handler.peer_connected(&node_id_1, &init_msg); let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { - MessageSendEvent::SendChannelRangeQuery{ node_id, msg } => { + MessageSendEvent::SendGossipTimestampFilter{ node_id, msg } => { assert_eq!(node_id, &node_id_1); assert_eq!(msg.chain_hash, chain_hash); - assert_eq!(msg.first_blocknum, first_blocknum); - assert_eq!(msg.number_of_blocks, number_of_blocks); + let expected_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + assert!((msg.first_timestamp as u64) >= expected_timestamp - 60*60*24*7*2); + assert!((msg.first_timestamp as u64) < expected_timestamp - 60*60*24*7*2 + 10); + assert_eq!(msg.timestamp_range, u32::max_value()); }, _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery") }; } - - // It should not enqueue a query when should_request_full_sync return false. - // The initial implementation allows syncing with the first 5 peers after - // which should_request_full_sync will return false - { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); - let init_msg = Init { features: InitFeatures::known() }; - for n in 1..7 { - let node_privkey = &SecretKey::from_slice(&[n; 32]).unwrap(); - let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); - net_graph_msg_handler.sync_routing_table(&node_id, &init_msg); - let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); - if n <= 5 { - assert_eq!(events.len(), 1); - } else { - assert_eq!(events.len(), 0); - } - - } - } - } - - #[test] - fn handling_reply_channel_range() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); - let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); - - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); - - // Test receipt of a single reply that should enqueue an SCID query - // matching the SCIDs in the reply - { - let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange { - chain_hash, - sync_complete: true, - first_blocknum: 0, - number_of_blocks: 2000, - short_channel_ids: vec![ - 0x0003e0_000000_0000, // 992x0x0 - 0x0003e8_000000_0000, // 1000x0x0 - 0x0003e9_000000_0000, // 1001x0x0 - 0x0003f0_000000_0000, // 1008x0x0 - 0x00044c_000000_0000, // 1100x0x0 - 0x0006e0_000000_0000, // 1760x0x0 - ], - }); - assert!(result.is_ok()); - - // We expect to emit a query_short_channel_ids message with the received scids - let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match &events[0] { - MessageSendEvent::SendShortIdsQuery { node_id, msg } => { - assert_eq!(node_id, &node_id_1); - assert_eq!(msg.chain_hash, chain_hash); - assert_eq!(msg.short_channel_ids, vec![ - 0x0003e0_000000_0000, // 992x0x0 - 0x0003e8_000000_0000, // 1000x0x0 - 0x0003e9_000000_0000, // 1001x0x0 - 0x0003f0_000000_0000, // 1008x0x0 - 0x00044c_000000_0000, // 1100x0x0 - 0x0006e0_000000_0000, // 1760x0x0 - ]); - }, - _ => panic!("expected MessageSendEvent::SendShortIdsQuery"), - } - } - } - - #[test] - fn handling_reply_short_channel_ids() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); - let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); - - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); - - // Test receipt of a successful reply - { - let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd { - chain_hash, - full_information: true, - }); - assert!(result.is_ok()); - } - - // Test receipt of a reply that indicates the peer does not maintain up-to-date information - // for the chain_hash requested in the query. - { - let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd { - chain_hash, - full_information: false, - }); - assert!(result.is_err()); - assert_eq!(result.err().unwrap().err, "Received reply_short_channel_ids_end with no information"); - } } #[test] fn handling_query_channel_range() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let chain_hash = genesis_block(Network::Testnet).header.block_hash(); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); - let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); - let bitcoin_key_1 = PublicKey::from_secret_key(&secp_ctx, node_1_btckey); - let bitcoin_key_2 = PublicKey::from_secret_key(&secp_ctx, node_2_btckey); let mut scids: Vec = vec![ scid_from_parts(0xfffffe, 0xffffff, 0xffff).unwrap(), // max @@ -2283,25 +2359,9 @@ mod tests { scids.push(scid_from_parts(108001, 1, 0).unwrap()); for scid in scids { - let unsigned_announcement = UnsignedChannelAnnouncement { - features: ChannelFeatures::known(), - chain_hash: chain_hash.clone(), - short_channel_id: scid, - node_id_1, - node_id_2, - bitcoin_key_1, - bitcoin_key_2, - excess_data: Vec::new(), - }; - - let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); - let valid_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), - contents: unsigned_announcement.clone(), - }; + let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { + unsigned_announcement.short_channel_id = scid; + }, node_1_privkey, node_2_privkey, &secp_ctx); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(_) => (), _ => panic!() @@ -2519,7 +2579,7 @@ mod tests { } fn do_handling_query_channel_range( - net_graph_msg_handler: &NetGraphMsgHandler, Arc>, + net_graph_msg_handler: &NetGraphMsgHandler<&NetworkGraph, Arc, Arc>, test_node_id: &PublicKey, msg: QueryChannelRange, expected_ok: bool, @@ -2568,7 +2628,8 @@ mod tests { #[test] fn handling_query_short_channel_ids() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); @@ -2582,7 +2643,7 @@ mod tests { } } -#[cfg(all(test, feature = "unstable"))] +#[cfg(all(test, feature = "_bench_unstable"))] mod benches { use super::*;