X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fnetwork_graph.rs;h=4c5e718fed162d66fca028f98392e0f51d23b4d7;hb=3d4735cc0a51800318d8abc1253e4ef5b43e590a;hp=3a7cd4eb985ad3713dd468277cedb59131f9e738;hpb=7a4a29ffe0dbfdd2fe82376024ae680401c74700;p=rust-lightning diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 3a7cd4eb..4c5e718f 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -29,7 +29,7 @@ use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, Reply use ln::msgs; use util::ser::{Writeable, Readable, Writer}; use util::logger::Logger; -use util::events; +use util::events::{MessageSendEvent, MessageSendEventsProvider}; use std::{cmp, fmt}; use std::sync::{RwLock, RwLockReadGuard}; @@ -40,6 +40,10 @@ use std::collections::btree_map::Entry as BtreeEntry; use std::ops::Deref; use bitcoin::hashes::hex::ToHex; +/// The maximum number of extra bytes which we do not understand in a gossip message before we will +/// refuse to relay the message. +const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024; + /// Represents the network as nodes and channels between them #[derive(PartialEq)] pub struct NetworkGraph { @@ -64,7 +68,7 @@ pub struct NetGraphMsgHandler where C::Target: chain::Access pub network_graph: RwLock, chain_access: Option, full_syncs_requested: AtomicUsize, - pending_events: Mutex>, + pending_events: Mutex>, logger: L, } @@ -105,6 +109,18 @@ impl NetGraphMsgHandler where C::Target: chain::Access pub fn read_locked_graph<'a>(&'a self) -> LockedNetworkGraph<'a> { LockedNetworkGraph(self.network_graph.read().unwrap()) } + + /// Returns true when a full routing table sync should be performed with a peer. + fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { + //TODO: Determine whether to request a full sync based on the network map. + const FULL_SYNCS_TO_REQUEST: usize = 5; + if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST { + self.full_syncs_requested.fetch_add(1, Ordering::AcqRel); + true + } else { + false + } + } } impl<'a> LockedNetworkGraph<'a> { @@ -127,13 +143,15 @@ macro_rules! secp_verify_sig { impl RoutingMessageHandler for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { self.network_graph.write().unwrap().update_node_from_announcement(msg, &self.secp_ctx)?; - Ok(msg.contents.excess_data.is_empty() && msg.contents.excess_address_data.is_empty()) + Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && + msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && + msg.contents.excess_data.len() + msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { self.network_graph.write().unwrap().update_channel_from_announcement(msg, &self.chain_access, &self.secp_ctx)?; log_trace!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); - Ok(msg.contents.excess_data.is_empty()) + Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) { @@ -152,7 +170,7 @@ impl RoutingMessageHandler for N fn handle_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result { self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx)?; - Ok(msg.contents.excess_data.is_empty()) + Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option, Option)> { @@ -207,34 +225,32 @@ impl RoutingMessageHandler for N result } - fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { - //TODO: Determine whether to request a full sync based on the network map. - const FULL_SYNCS_TO_REQUEST: usize = 5; - if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST { - self.full_syncs_requested.fetch_add(1, Ordering::AcqRel); - true - } else { - false - } - } - /// Initiates a stateless sync of routing gossip information with a peer - /// by calling query_channel_range. The default strategy used by this - /// implementation is to sync for the full block range with several peers. + /// using gossip_queries. The default strategy used by this implementation + /// is to sync the full block range with several peers. + /// /// We should expect one or more reply_channel_range messages in response - /// to our query. Each reply will enqueue a query_scid message to request - /// gossip messages for each channel. The sync is considered complete when - /// the final reply_scids_end message is received, though we are not + /// to our query_channel_range. Each reply will enqueue a query_scid message + /// to request gossip messages for each channel. The sync is considered complete + /// when the final reply_scids_end message is received, though we are not /// tracking this directly. fn sync_routing_table(&self, their_node_id: &PublicKey, init_msg: &Init) { + + // We will only perform a sync with peers that support gossip_queries. if !init_msg.features.supports_gossip_queries() { return (); } + + // Check if we need to perform a full synchronization with this peer + if !self.should_request_full_sync(their_node_id) { + return (); + } + let first_blocknum = 0; let number_of_blocks = 0xffffffff; log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks); let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::MessageSendEvent::SendChannelRangeQuery { + pending_events.push(MessageSendEvent::SendChannelRangeQuery { node_id: their_node_id.clone(), msg: QueryChannelRange { chain_hash: self.network_graph.read().unwrap().genesis_hash, @@ -249,24 +265,16 @@ impl RoutingMessageHandler for N /// stateless, it does not validate the sequencing of replies for multi- /// reply ranges. It does not validate whether the reply(ies) cover the /// queried range. It also does not filter SCIDs to only those in the - /// original query range. + /// original query range. We also do not validate that the chain_hash + /// matches the chain_hash of the NetworkGraph. Any chan_ann message that + /// does not match our chain_hash will be rejected when the announcement is + /// processed. fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: ReplyChannelRange) -> Result<(), LightningError> { - log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, full_information={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.full_information, msg.short_channel_ids.len(),); - - // Validate that the remote node maintains up-to-date channel - // information for chain_hash. Some nodes use the full_information - // flag to indicate multi-part messages so we must check whether - // we received SCIDs as well. - if !msg.full_information && msg.short_channel_ids.len() == 0 { - return Err(LightningError { - err: String::from("Received reply_channel_range with no information available"), - action: ErrorAction::IgnoreError, - }); - } + log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, sync_complete={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete, msg.short_channel_ids.len(),); log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len()); let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::MessageSendEvent::SendShortIdsQuery { + pending_events.push(MessageSendEvent::SendShortIdsQuery { node_id: their_node_id.clone(), msg: QueryShortChannelIds { chain_hash: msg.chain_hash, @@ -314,12 +322,12 @@ impl RoutingMessageHandler for N } } -impl events::MessageSendEventsProvider for NetGraphMsgHandler +impl MessageSendEventsProvider for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger, { - fn get_and_clear_pending_msg_events(&self) -> Vec { + fn get_and_clear_pending_msg_events(&self) -> Vec { let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); std::mem::swap(&mut ret, &mut pending_events); @@ -327,7 +335,7 @@ where } } -#[derive(PartialEq, Debug)] +#[derive(Clone, PartialEq, Debug)] /// Details about one direction of a channel. Received /// within a channel update. pub struct DirectionalChannelInfo { @@ -439,7 +447,7 @@ impl Writeable for RoutingFees { } } -#[derive(PartialEq, Debug)] +#[derive(Clone, PartialEq, Debug)] /// Information received in the latest node_announcement from this node. pub struct NodeAnnouncementInfo { /// Protocol features the node announced support for @@ -505,7 +513,7 @@ impl Readable for NodeAnnouncementInfo { } } -#[derive(PartialEq)] +#[derive(Clone, PartialEq)] /// Details about a node in the network, known from the network announcement. pub struct NodeInfo { /// All valid channels a node has announced @@ -678,7 +686,10 @@ impl NetworkGraph { } } - let should_relay = msg.excess_data.is_empty() && msg.excess_address_data.is_empty(); + let should_relay = + msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && + msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && + msg.excess_data.len() + msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY; node.announcement_info = Some(NodeAnnouncementInfo { features: msg.features.clone(), last_update: msg.timestamp, @@ -771,7 +782,8 @@ impl NetworkGraph { node_two: msg.node_id_2.clone(), two_to_one: None, capacity_sats: utxo_value, - announcement_message: if msg.excess_data.is_empty() { full_msg.cloned() } else { None }, + announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY + { full_msg.cloned() } else { None }, }; match self.channels.entry(msg.short_channel_id) { @@ -900,7 +912,8 @@ impl NetworkGraph { chan_was_enabled = false; } - let last_update_message = if msg.excess_data.is_empty() { full_msg.cloned() } else { None }; + let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY + { full_msg.cloned() } else { None }; let updated_channel_dir_info = DirectionalChannelInfo { enabled: chan_enabled, @@ -1000,7 +1013,7 @@ impl NetworkGraph { mod tests { use chain; use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; - use routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; + use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, MAX_EXCESS_BYTES_FOR_RELAY}; use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, HTLCFailChannelUpdate, ReplyChannelRange, ReplyShortChannelIdsEnd, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; @@ -1122,7 +1135,7 @@ mod tests { }; unsigned_announcement.timestamp += 1000; - unsigned_announcement.excess_data.push(1); + unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let announcement_with_data = NodeAnnouncement { signature: secp_ctx.sign(&msghash, node_1_privkey), @@ -1290,7 +1303,7 @@ mod tests { // Don't relay valid channels with excess data unsigned_announcement.short_channel_id += 1; - unsigned_announcement.excess_data.push(1); + unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), @@ -1420,7 +1433,7 @@ mod tests { } unsigned_channel_update.timestamp += 100; - unsigned_channel_update.excess_data.push(1); + unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), @@ -1720,7 +1733,7 @@ mod tests { htlc_maximum_msat: OptionalField::Absent, fee_base_msat: 10000, fee_proportional_millionths: 20, - excess_data: [1; 3].to_vec() + excess_data: [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec() }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { @@ -1849,7 +1862,7 @@ mod tests { alias: [0; 32], addresses: Vec::new(), excess_address_data: Vec::new(), - excess_data: [1; 3].to_vec(), + excess_data: [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = NodeAnnouncement { @@ -1967,6 +1980,26 @@ mod tests { _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery") }; } + + // It should not enqueue a query when should_request_full_sync return false. + // The initial implementation allows syncing with the first 5 peers after + // which should_request_full_sync will return false + { + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let init_msg = Init { features: InitFeatures::known() }; + for n in 1..7 { + let node_privkey = &SecretKey::from_slice(&[n; 32]).unwrap(); + let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); + net_graph_msg_handler.sync_routing_table(&node_id, &init_msg); + let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); + if n <= 5 { + assert_eq!(events.len(), 1); + } else { + assert_eq!(events.len(), 0); + } + + } + } } #[test] @@ -1980,10 +2013,9 @@ mod tests { // Test receipt of a single reply that should enqueue an SCID query // matching the SCIDs in the reply { - // Handle a single successful reply that encompasses the queried channel range let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange { chain_hash, - full_information: true, + sync_complete: true, first_blocknum: 0, number_of_blocks: 2000, short_channel_ids: vec![ @@ -2016,22 +2048,6 @@ mod tests { _ => panic!("expected MessageSendEvent::SendShortIdsQuery"), } } - - // Test receipt of a reply that indicates the remote node does not maintain up-to-date - // information for the chain_hash. Because of discrepancies in implementation we use - // full_information=false and short_channel_ids=[] as the signal. - { - // Handle the reply indicating the peer was unable to fulfill our request. - let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange { - chain_hash, - full_information: false, - first_blocknum: 1000, - number_of_blocks: 100, - short_channel_ids: vec![], - }); - assert!(result.is_err()); - assert_eq!(result.err().unwrap().err, "Received reply_channel_range with no information available"); - } } #[test]