X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fnetwork_graph.rs;h=2e0679eba79f6cbbb1473ebdf89a0e113cf8fd5e;hb=65920818db58880f6576fd50c3ea5df273912978;hp=e5961423fc2f3b7fa409d6b1b3ffce6c85a65cec;hpb=d5a1435905a82045bd9a984421d180d0f12187a4;p=rust-lightning diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index e5961423..2e0679eb 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -10,7 +10,7 @@ //! The top-level network map tracking logic lives here. use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; -use bitcoin::secp256k1::key::PublicKey; +use bitcoin::secp256k1::PublicKey; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1; @@ -25,7 +25,7 @@ use chain; use chain::Access; use ln::features::{ChannelFeatures, NodeFeatures}; use ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; -use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField}; +use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField, GossipTimestampFilter}; use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; @@ -251,6 +251,8 @@ where C::Target: chain::Access, L::Target: Logger /// Gets a reference to the underlying [`NetworkGraph`] which was provided in /// [`NetGraphMsgHandler::new`]. + /// + /// (C-not exported) as bindings don't support a reference-to-a-reference yet pub fn network_graph(&self) -> &G { &self.network_graph } @@ -292,10 +294,21 @@ where C::Target: chain::Access, L::Target: Logger } macro_rules! secp_verify_sig { - ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr ) => { - match $secp_ctx.verify($msg, $sig, $pubkey) { + ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr, $msg_type: expr ) => { + match $secp_ctx.verify_ecdsa($msg, $sig, $pubkey) { Ok(_) => {}, - Err(_) => return Err(LightningError{err: "Invalid signature from remote node".to_owned(), action: ErrorAction::IgnoreError}), + Err(_) => { + return Err(LightningError { + err: format!("Invalid signature on {} message", $msg_type), + action: ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id: [0; 32], + data: format!("Invalid signature on {} message", $msg_type), + }, + log_level: Level::Trace, + }, + }); + }, } }; } @@ -382,74 +395,97 @@ where C::Target: chain::Access, L::Target: Logger /// to request gossip messages for each channel. The sync is considered complete /// when the final reply_scids_end message is received, though we are not /// tracking this directly. - fn sync_routing_table(&self, their_node_id: &PublicKey, init_msg: &Init) { - + fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init) { // We will only perform a sync with peers that support gossip_queries. if !init_msg.features.supports_gossip_queries() { return (); } - // Check if we need to perform a full synchronization with this peer - if !self.should_request_full_sync(&their_node_id) { - return (); + // The lightning network's gossip sync system is completely broken in numerous ways. + // + // Given no broadly-available set-reconciliation protocol, the only reasonable approach is + // to do a full sync from the first few peers we connect to, and then receive gossip + // updates from all our peers normally. + // + // Originally, we could simply tell a peer to dump us the entire gossip table on startup, + // wasting lots of bandwidth but ensuring we have the full network graph. After the initial + // dump peers would always send gossip and we'd stay up-to-date with whatever our peer has + // seen. + // + // In order to reduce the bandwidth waste, "gossip queries" were introduced, allowing you + // to ask for the SCIDs of all channels in your peer's routing graph, and then only request + // channel data which you are missing. Except there was no way at all to identify which + // `channel_update`s you were missing, so you still had to request everything, just in a + // very complicated way with some queries instead of just getting the dump. + // + // Later, an option was added to fetch the latest timestamps of the `channel_update`s to + // make efficient sync possible, however it has yet to be implemented in lnd, which makes + // relying on it useless. + // + // After gossip queries were introduced, support for receiving a full gossip table dump on + // connection was removed from several nodes, making it impossible to get a full sync + // without using the "gossip queries" messages. + // + // Once you opt into "gossip queries" the only way to receive any gossip updates that a + // peer receives after you connect, you must send a `gossip_timestamp_filter` message. This + // message, as the name implies, tells the peer to not forward any gossip messages with a + // timestamp older than a given value (not the time the peer received the filter, but the + // timestamp in the update message, which is often hours behind when the peer received the + // message). + // + // Obnoxiously, `gossip_timestamp_filter` isn't *just* a filter, but its also a request for + // your peer to send you the full routing graph (subject to the filter). Thus, in order to + // tell a peer to send you any updates as it sees them, you have to also ask for the full + // routing graph to be synced. If you set a timestamp filter near the current time, peers + // will simply not forward any new updates they see to you which were generated some time + // ago (which is not uncommon). If you instead set a timestamp filter near 0 (or two weeks + // ago), you will always get the full routing graph from all your peers. + // + // Most lightning nodes today opt to simply turn off receiving gossip data which only + // propagated some time after it was generated, and, worse, often disable gossiping with + // several peers after their first connection. The second behavior can cause gossip to not + // propagate fully if there are cuts in the gossiping subgraph. + // + // In an attempt to cut a middle ground between always fetching the full graph from all of + // our peers and never receiving gossip from peers at all, we send all of our peers a + // `gossip_timestamp_filter`, with the filter time set either two weeks ago or an hour ago. + // + // For no-std builds, we bury our head in the sand and do a full sync on each connection. + let should_request_full_sync = self.should_request_full_sync(&their_node_id); + #[allow(unused_mut, unused_assignments)] + let mut gossip_start_time = 0; + #[cfg(feature = "std")] + { + gossip_start_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + if should_request_full_sync { + gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago + } else { + gossip_start_time -= 60 * 60; // an hour ago + } } - let first_blocknum = 0; - let number_of_blocks = 0xffffffff; - log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks); let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(MessageSendEvent::SendChannelRangeQuery { + pending_events.push(MessageSendEvent::SendGossipTimestampFilter { node_id: their_node_id.clone(), - msg: QueryChannelRange { + msg: GossipTimestampFilter { chain_hash: self.network_graph.genesis_hash, - first_blocknum, - number_of_blocks, + first_timestamp: gossip_start_time as u32, // 2106 issue! + timestamp_range: u32::max_value(), }, }); } - /// Statelessly processes a reply to a channel range query by immediately - /// sending an SCID query with SCIDs in the reply. To keep this handler - /// stateless, it does not validate the sequencing of replies for multi- - /// reply ranges. It does not validate whether the reply(ies) cover the - /// queried range. It also does not filter SCIDs to only those in the - /// original query range. We also do not validate that the chain_hash - /// matches the chain_hash of the NetworkGraph. Any chan_ann message that - /// does not match our chain_hash will be rejected when the announcement is - /// processed. - fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: ReplyChannelRange) -> Result<(), LightningError> { - log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, sync_complete={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete, msg.short_channel_ids.len(),); - - log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len()); - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(MessageSendEvent::SendShortIdsQuery { - node_id: their_node_id.clone(), - msg: QueryShortChannelIds { - chain_hash: msg.chain_hash, - short_channel_ids: msg.short_channel_ids, - } - }); - + fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { + // We don't make queries, so should never receive replies. If, in the future, the set + // reconciliation extensions to gossip queries become broadly supported, we should revert + // this code to its state pre-0.0.106. Ok(()) } - /// When an SCID query is initiated the remote peer will begin streaming - /// gossip messages. In the event of a failure, we may have received - /// some channel information. Before trying with another peer, the - /// caller should update its set of SCIDs that need to be queried. - fn handle_reply_short_channel_ids_end(&self, their_node_id: &PublicKey, msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { - log_debug!(self.logger, "Handling reply_short_channel_ids_end peer={}, full_information={}", log_pubkey!(their_node_id), msg.full_information); - - // If the remote node does not have up-to-date information for the - // chain_hash they will set full_information=false. We can fail - // the result and try again with a different peer. - if !msg.full_information { - return Err(LightningError { - err: String::from("Received reply_short_channel_ids_end with no information"), - action: ErrorAction::IgnoreError - }); - } - + fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { + // We don't make queries, so should never receive replies. If, in the future, the set + // reconciliation extensions to gossip queries become broadly supported, we should revert + // this code to its state pre-0.0.106. Ok(()) } @@ -580,9 +616,8 @@ where } #[derive(Clone, Debug, PartialEq)] -/// Details about one direction of a channel. Received -/// within a channel update. -pub struct DirectionalChannelInfo { +/// Details about one direction of a channel as received within a [`ChannelUpdate`]. +pub struct ChannelUpdateInfo { /// When the last update to the channel direction was issued. /// Value is opaque, as set in the announcement. pub last_update: u32, @@ -603,14 +638,14 @@ pub struct DirectionalChannelInfo { pub last_update_message: Option, } -impl fmt::Display for DirectionalChannelInfo { +impl fmt::Display for ChannelUpdateInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fees {:?}", self.last_update, self.enabled, self.cltv_expiry_delta, self.htlc_minimum_msat, self.fees)?; Ok(()) } } -impl_writeable_tlv_based!(DirectionalChannelInfo, { +impl_writeable_tlv_based!(ChannelUpdateInfo, { (0, last_update, required), (2, enabled, required), (4, cltv_expiry_delta, required), @@ -629,11 +664,11 @@ pub struct ChannelInfo { /// Source node of the first direction of a channel pub node_one: NodeId, /// Details about the first direction of a channel - pub one_to_two: Option, + pub one_to_two: Option, /// Source node of the second direction of a channel pub node_two: NodeId, /// Details about the second direction of a channel - pub two_to_one: Option, + pub two_to_one: Option, /// The channel capacity as seen on-chain, if chain lookup is available. pub capacity_sats: Option, /// An initial announcement of the channel @@ -647,6 +682,38 @@ pub struct ChannelInfo { announcement_received_time: u64, } +impl ChannelInfo { + /// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a + /// returned `source`, or `None` if `target` is not one of the channel's counterparties. + pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { + let (direction, source) = { + if target == &self.node_one { + (self.two_to_one.as_ref(), &self.node_two) + } else if target == &self.node_two { + (self.one_to_two.as_ref(), &self.node_one) + } else { + return None; + } + }; + Some((DirectedChannelInfo { channel: self, direction }, source)) + } + + /// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a + /// returned `target`, or `None` if `source` is not one of the channel's counterparties. + pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> { + let (direction, target) = { + if source == &self.node_one { + (self.one_to_two.as_ref(), &self.node_two) + } else if source == &self.node_two { + (self.two_to_one.as_ref(), &self.node_one) + } else { + return None; + } + }; + Some((DirectedChannelInfo { channel: self, direction }, target)) + } +} + impl fmt::Display for ChannelInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}", @@ -666,6 +733,132 @@ impl_writeable_tlv_based!(ChannelInfo, { (12, announcement_message, required), }); +/// A wrapper around [`ChannelInfo`] representing information about the channel as directed from a +/// source node to a target node. +#[derive(Clone)] +pub struct DirectedChannelInfo<'a> { + channel: &'a ChannelInfo, + direction: Option<&'a ChannelUpdateInfo>, +} + +impl<'a> DirectedChannelInfo<'a> { + /// Returns information for the channel. + pub fn channel(&self) -> &'a ChannelInfo { self.channel } + + /// Returns information for the direction. + pub fn direction(&self) -> Option<&'a ChannelUpdateInfo> { self.direction } + + /// Returns the [`EffectiveCapacity`] of the channel in the direction. + /// + /// This is either the total capacity from the funding transaction, if known, or the + /// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known, + /// whichever is smaller. + pub fn effective_capacity(&self) -> EffectiveCapacity { + let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000); + self.direction + .and_then(|direction| direction.htlc_maximum_msat) + .map(|max_htlc_msat| { + let capacity_msat = capacity_msat.unwrap_or(u64::max_value()); + if max_htlc_msat < capacity_msat { + EffectiveCapacity::MaximumHTLC { amount_msat: max_htlc_msat } + } else { + EffectiveCapacity::Total { capacity_msat } + } + }) + .or_else(|| capacity_msat.map(|capacity_msat| + EffectiveCapacity::Total { capacity_msat })) + .unwrap_or(EffectiveCapacity::Unknown) + } + + /// Returns `Some` if [`ChannelUpdateInfo`] is available in the direction. + pub(super) fn with_update(self) -> Option> { + match self.direction { + Some(_) => Some(DirectedChannelInfoWithUpdate { inner: self }), + None => None, + } + } +} + +impl<'a> fmt::Debug for DirectedChannelInfo<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("DirectedChannelInfo") + .field("channel", &self.channel) + .finish() + } +} + +/// A [`DirectedChannelInfo`] with [`ChannelUpdateInfo`] available in its direction. +#[derive(Clone)] +pub(super) struct DirectedChannelInfoWithUpdate<'a> { + inner: DirectedChannelInfo<'a>, +} + +impl<'a> DirectedChannelInfoWithUpdate<'a> { + /// Returns information for the channel. + #[inline] + pub(super) fn channel(&self) -> &'a ChannelInfo { &self.inner.channel } + + /// Returns information for the direction. + #[inline] + pub(super) fn direction(&self) -> &'a ChannelUpdateInfo { self.inner.direction.unwrap() } + + /// Returns the [`EffectiveCapacity`] of the channel in the direction. + #[inline] + pub(super) fn effective_capacity(&self) -> EffectiveCapacity { self.inner.effective_capacity() } +} + +impl<'a> fmt::Debug for DirectedChannelInfoWithUpdate<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + self.inner.fmt(f) + } +} + +/// The effective capacity of a channel for routing purposes. +/// +/// While this may be smaller than the actual channel capacity, amounts greater than +/// [`Self::as_msat`] should not be routed through the channel. +pub enum EffectiveCapacity { + /// The available liquidity in the channel known from being a channel counterparty, and thus a + /// direct hop. + ExactLiquidity { + /// Either the inbound or outbound liquidity depending on the direction, denominated in + /// millisatoshi. + liquidity_msat: u64, + }, + /// The maximum HTLC amount in one direction as advertised on the gossip network. + MaximumHTLC { + /// The maximum HTLC amount denominated in millisatoshi. + amount_msat: u64, + }, + /// The total capacity of the channel as determined by the funding transaction. + Total { + /// The funding amount denominated in millisatoshi. + capacity_msat: u64, + }, + /// A capacity sufficient to route any payment, typically used for private channels provided by + /// an invoice. + Infinite, + /// A capacity that is unknown possibly because either the chain state is unavailable to know + /// the total capacity or the `htlc_maximum_msat` was not advertised on the gossip network. + Unknown, +} + +/// The presumed channel capacity denominated in millisatoshi for [`EffectiveCapacity::Unknown`] to +/// use when making routing decisions. +pub const UNKNOWN_CHANNEL_CAPACITY_MSAT: u64 = 250_000 * 1000; + +impl EffectiveCapacity { + /// Returns the effective capacity denominated in millisatoshi. + pub fn as_msat(&self) -> u64 { + match self { + EffectiveCapacity::ExactLiquidity { liquidity_msat } => *liquidity_msat, + EffectiveCapacity::MaximumHTLC { amount_msat } => *amount_msat, + EffectiveCapacity::Total { capacity_msat } => *capacity_msat, + EffectiveCapacity::Infinite => u64::max_value(), + EffectiveCapacity::Unknown => UNKNOWN_CHANNEL_CAPACITY_MSAT, + } + } +} /// Fees for routing via a given channel or a node #[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)] @@ -840,6 +1033,15 @@ impl NetworkGraph { } } + /// Clears the `NodeAnnouncementInfo` field for all nodes in the `NetworkGraph` for testing + /// purposes. + #[cfg(test)] + pub fn clear_nodes_announcement_info(&self) { + for node in self.nodes.write().unwrap().iter_mut() { + node.1.announcement_info = None; + } + } + /// For an already known node (from channel announcements), update its stored properties from a /// given node announcement. /// @@ -848,7 +1050,7 @@ impl NetworkGraph { /// routing messages from a source using a protocol other than the lightning P2P protocol. pub fn update_node_from_announcement(&self, msg: &msgs::NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id, "node_announcement"); self.update_node_from_announcement_intern(&msg.contents, Some(&msg)) } @@ -908,10 +1110,10 @@ impl NetworkGraph { C::Target: chain::Access, { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &msg.contents.node_id_1); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &msg.contents.node_id_2); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &msg.contents.bitcoin_key_1); - secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &msg.contents.bitcoin_key_2); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &msg.contents.node_id_1, "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &msg.contents.node_id_2, "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &msg.contents.bitcoin_key_1, "channel_announcement"); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &msg.contents.bitcoin_key_2, "channel_announcement"); self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), chain_access) } @@ -1154,10 +1356,10 @@ impl NetworkGraph { /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or /// materially in the future will be rejected. pub fn update_channel_unsigned(&self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { - self.update_channel_intern(msg, None, None::<(&secp256k1::Signature, &Secp256k1)>) + self.update_channel_intern(msg, None, None::<(&secp256k1::ecdsa::Signature, &Secp256k1)>) } - fn update_channel_intern(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig_info: Option<(&secp256k1::Signature, &Secp256k1)>) -> Result<(), LightningError> { + fn update_channel_intern(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig_info: Option<(&secp256k1::ecdsa::Signature, &Secp256k1)>) -> Result<(), LightningError> { let dest_node_id; let chan_enabled = msg.flags & (1 << 1) != (1 << 1); let chan_was_enabled; @@ -1192,8 +1394,8 @@ impl NetworkGraph { } } } - macro_rules! maybe_update_channel_info { - ( $target: expr, $src_node: expr) => { + macro_rules! check_update_latest { + ($target: expr) => { if let Some(existing_chan_info) = $target.as_ref() { // The timestamp field is somewhat of a misnomer - the BOLTs use it to // order updates to ensure you always have the latest one, only @@ -1210,11 +1412,15 @@ impl NetworkGraph { } else { chan_was_enabled = false; } + } + } + macro_rules! get_new_channel_info { + () => { { let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY { full_msg.cloned() } else { None }; - let updated_channel_dir_info = DirectionalChannelInfo { + let updated_channel_update_info = ChannelUpdateInfo { enabled: chan_enabled, last_update: msg.timestamp, cltv_expiry_delta: msg.cltv_expiry_delta, @@ -1226,29 +1432,31 @@ impl NetworkGraph { }, last_update_message }; - $target = Some(updated_channel_dir_info); - } + Some(updated_channel_update_info) + } } } let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); if msg.flags & 1 == 1 { dest_node_id = channel.node_one.clone(); + check_update_latest!(channel.two_to_one); if let Some((sig, ctx)) = sig_info { secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_two.as_slice()).map_err(|_| LightningError{ err: "Couldn't parse source node pubkey".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Debug) - })?); + })?, "channel_update"); } - maybe_update_channel_info!(channel.two_to_one, channel.node_two); + channel.two_to_one = get_new_channel_info!(); } else { dest_node_id = channel.node_two.clone(); + check_update_latest!(channel.one_to_two); if let Some((sig, ctx)) = sig_info { secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_one.as_slice()).map_err(|_| LightningError{ err: "Couldn't parse destination node pubkey".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Debug) - })?); + })?, "channel_update"); } - maybe_update_channel_info!(channel.one_to_two, channel.node_one); + channel.one_to_two = get_new_channel_info!(); } } } @@ -1351,7 +1559,7 @@ mod tests { use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, NetworkUpdate, MAX_EXCESS_BYTES_FOR_RELAY}; use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, - ReplyChannelRange, ReplyShortChannelIdsEnd, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; + ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; use util::test_utils; use util::logger::Logger; use util::ser::{Readable, Writeable}; @@ -1370,10 +1578,11 @@ mod tests { use hex; - use bitcoin::secp256k1::key::{PublicKey, SecretKey}; + use bitcoin::secp256k1::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; use io; + use bitcoin::secp256k1; use prelude::*; use sync::Arc; @@ -1420,7 +1629,7 @@ mod tests { f(&mut unsigned_announcement); let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); NodeAnnouncement { - signature: secp_ctx.sign(&msghash, node_key), + signature: secp_ctx.sign_ecdsa(&msghash, node_key), contents: unsigned_announcement } } @@ -1444,10 +1653,10 @@ mod tests { f(&mut unsigned_announcement); let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_key), - node_signature_2: secp_ctx.sign(&msghash, node_2_key), - bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), - bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), + node_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_key), + node_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_key), + bitcoin_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_btckey), + bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_btckey), contents: unsigned_announcement, } } @@ -1479,7 +1688,7 @@ mod tests { f(&mut unsigned_channel_update); let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); ChannelUpdate { - signature: secp_ctx.sign(&msghash, node_key), + signature: secp_ctx.sign_ecdsa(&msghash, node_key), contents: unsigned_channel_update } } @@ -1516,11 +1725,11 @@ mod tests { let fake_msghash = hash_to_message!(&zero_hash); match net_graph_msg_handler.handle_node_announcement( &NodeAnnouncement { - signature: secp_ctx.sign(&fake_msghash, node_1_privkey), + signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey), contents: valid_announcement.contents.clone() }) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Invalid signature from remote node") + Err(e) => assert_eq!(e.err, "Invalid signature on node_announcement message") }; let announcement_with_data = get_signed_node_announcement(|unsigned_announcement| { @@ -1649,7 +1858,7 @@ mod tests { invalid_sig_announcement.contents.excess_data = Vec::new(); match net_graph_msg_handler.handle_channel_announcement(&invalid_sig_announcement) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Invalid signature from remote node") + Err(e) => assert_eq!(e.err, "Invalid signature on channel_announcement message") }; let channel_to_itself_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_1_privkey, &secp_ctx); @@ -1755,10 +1964,10 @@ mod tests { }, node_1_privkey, &secp_ctx); let zero_hash = Sha256dHash::hash(&[0; 32]); let fake_msghash = hash_to_message!(&zero_hash); - invalid_sig_channel_update.signature = secp_ctx.sign(&fake_msghash, node_1_privkey); + invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey); match net_graph_msg_handler.handle_channel_update(&invalid_sig_channel_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Invalid signature from remote node") + Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message") }; } @@ -2088,140 +2297,43 @@ mod tests { } #[test] + #[cfg(feature = "std")] fn calling_sync_routing_table() { + use std::time::{SystemTime, UNIX_EPOCH}; + let network_graph = create_network_graph(); let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); let chain_hash = genesis_block(Network::Testnet).header.block_hash(); - let first_blocknum = 0; - let number_of_blocks = 0xffff_ffff; // It should ignore if gossip_queries feature is not enabled { - let init_msg = Init { features: InitFeatures::known().clear_gossip_queries() }; - net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg); + let init_msg = Init { features: InitFeatures::known().clear_gossip_queries(), remote_network_address: None }; + net_graph_msg_handler.peer_connected(&node_id_1, &init_msg); let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); } - // It should send a query_channel_message with the correct information + // It should send a gossip_timestamp_filter with the correct information { - let init_msg = Init { features: InitFeatures::known() }; - net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg); + let init_msg = Init { features: InitFeatures::known(), remote_network_address: None }; + net_graph_msg_handler.peer_connected(&node_id_1, &init_msg); let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { - MessageSendEvent::SendChannelRangeQuery{ node_id, msg } => { + MessageSendEvent::SendGossipTimestampFilter{ node_id, msg } => { assert_eq!(node_id, &node_id_1); assert_eq!(msg.chain_hash, chain_hash); - assert_eq!(msg.first_blocknum, first_blocknum); - assert_eq!(msg.number_of_blocks, number_of_blocks); + let expected_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + assert!((msg.first_timestamp as u64) >= expected_timestamp - 60*60*24*7*2); + assert!((msg.first_timestamp as u64) < expected_timestamp - 60*60*24*7*2 + 10); + assert_eq!(msg.timestamp_range, u32::max_value()); }, _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery") }; } - - // It should not enqueue a query when should_request_full_sync return false. - // The initial implementation allows syncing with the first 5 peers after - // which should_request_full_sync will return false - { - let network_graph = create_network_graph(); - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); - let init_msg = Init { features: InitFeatures::known() }; - for n in 1..7 { - let node_privkey = &SecretKey::from_slice(&[n; 32]).unwrap(); - let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); - net_graph_msg_handler.sync_routing_table(&node_id, &init_msg); - let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); - if n <= 5 { - assert_eq!(events.len(), 1); - } else { - assert_eq!(events.len(), 0); - } - - } - } - } - - #[test] - fn handling_reply_channel_range() { - let network_graph = create_network_graph(); - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); - let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); - let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); - - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); - - // Test receipt of a single reply that should enqueue an SCID query - // matching the SCIDs in the reply - { - let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange { - chain_hash, - sync_complete: true, - first_blocknum: 0, - number_of_blocks: 2000, - short_channel_ids: vec![ - 0x0003e0_000000_0000, // 992x0x0 - 0x0003e8_000000_0000, // 1000x0x0 - 0x0003e9_000000_0000, // 1001x0x0 - 0x0003f0_000000_0000, // 1008x0x0 - 0x00044c_000000_0000, // 1100x0x0 - 0x0006e0_000000_0000, // 1760x0x0 - ], - }); - assert!(result.is_ok()); - - // We expect to emit a query_short_channel_ids message with the received scids - let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match &events[0] { - MessageSendEvent::SendShortIdsQuery { node_id, msg } => { - assert_eq!(node_id, &node_id_1); - assert_eq!(msg.chain_hash, chain_hash); - assert_eq!(msg.short_channel_ids, vec![ - 0x0003e0_000000_0000, // 992x0x0 - 0x0003e8_000000_0000, // 1000x0x0 - 0x0003e9_000000_0000, // 1001x0x0 - 0x0003f0_000000_0000, // 1008x0x0 - 0x00044c_000000_0000, // 1100x0x0 - 0x0006e0_000000_0000, // 1760x0x0 - ]); - }, - _ => panic!("expected MessageSendEvent::SendShortIdsQuery"), - } - } - } - - #[test] - fn handling_reply_short_channel_ids() { - let network_graph = create_network_graph(); - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); - let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); - - let chain_hash = genesis_block(Network::Testnet).header.block_hash(); - - // Test receipt of a successful reply - { - let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd { - chain_hash, - full_information: true, - }); - assert!(result.is_ok()); - } - - // Test receipt of a reply that indicates the peer does not maintain up-to-date information - // for the chain_hash requested in the query. - { - let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd { - chain_hash, - full_information: false, - }); - assert!(result.is_err()); - assert_eq!(result.err().unwrap().err, "Received reply_short_channel_ids_end with no information"); - } } #[test] @@ -2532,7 +2644,7 @@ mod tests { } } -#[cfg(all(test, feature = "unstable"))] +#[cfg(all(test, feature = "_bench_unstable"))] mod benches { use super::*;