Add PeerManager::disconnect_by_node_id()
[rust-lightning] / lightning / src / routing / network_graph.rs
index c1258ce37a2b1a6406dc53b5dea00b55401aff23..8075462c938b797d0f8d923d786be9239533317c 100644 (file)
@@ -23,7 +23,7 @@ use bitcoin::hash_types::BlockHash;
 use chain;
 use chain::Access;
 use ln::features::{ChannelFeatures, NodeFeatures};
-use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
+use ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
 use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField};
 use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
 use ln::msgs;
@@ -105,6 +105,18 @@ impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: chain::Access
        pub fn read_locked_graph<'a>(&'a self) -> LockedNetworkGraph<'a> {
                LockedNetworkGraph(self.network_graph.read().unwrap())
        }
+
+       /// Returns true when a full routing table sync should be performed with a peer.
+       fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool {
+               //TODO: Determine whether to request a full sync based on the network map.
+               const FULL_SYNCS_TO_REQUEST: usize = 5;
+               if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST {
+                       self.full_syncs_requested.fetch_add(1, Ordering::AcqRel);
+                       true
+               } else {
+                       false
+               }
+       }
 }
 
 impl<'a> LockedNetworkGraph<'a> {
@@ -207,26 +219,27 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                result
        }
 
-       fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool {
-               //TODO: Determine whether to request a full sync based on the network map.
-               const FULL_SYNCS_TO_REQUEST: usize = 5;
-               if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST {
-                       self.full_syncs_requested.fetch_add(1, Ordering::AcqRel);
-                       true
-               } else {
-                       false
-               }
-       }
-
        /// Initiates a stateless sync of routing gossip information with a peer
-       /// by calling query_channel_range. The default strategy used by this
-       /// implementation is to sync for the full block range with several peers.
+       /// using gossip_queries. The default strategy used by this implementation
+       /// is to sync the full block range with several peers.
+       ///
        /// We should expect one or more reply_channel_range messages in response
-       /// to our query. Each reply will enqueue a query_scid message to request
-       /// gossip messages for each channel. The sync is considered complete when
-       /// the final reply_scids_end message is received, though we are not
+       /// to our query_channel_range. Each reply will enqueue a query_scid message
+       /// to request gossip messages for each channel. The sync is considered complete
+       /// when the final reply_scids_end message is received, though we are not
        /// tracking this directly.
-       fn sync_routing_table(&self, their_node_id: &PublicKey) {
+       fn sync_routing_table(&self, their_node_id: &PublicKey, init_msg: &Init) {
+
+               // We will only perform a sync with peers that support gossip_queries.
+               if !init_msg.features.supports_gossip_queries() {
+                       return ();
+               }
+
+               // Check if we need to perform a full synchronization with this peer
+               if !self.should_request_full_sync(their_node_id) {
+                       return ();
+               }
+
                let first_blocknum = 0;
                let number_of_blocks = 0xffffffff;
                log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks);
@@ -246,10 +259,11 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
        /// stateless, it does not validate the sequencing of replies for multi-
        /// reply ranges. It does not validate whether the reply(ies) cover the
        /// queried range. It also does not filter SCIDs to only those in the
-       /// original query range. In the event of a failure, we may have received
-       /// some channel information. Before trying with another peer, the
-       /// caller should update its set of SCIDs that need to be queried.
-       fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: &ReplyChannelRange) -> Result<(), LightningError> {
+       /// original query range. We also do not validate that the chain_hash
+       /// matches the chain_hash of the NetworkGraph. Any chan_ann message that
+       /// does not match our chain_hash will be rejected when the announcement is
+       /// processed.
+       fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: ReplyChannelRange) -> Result<(), LightningError> {
                log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, full_information={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.full_information, msg.short_channel_ids.len(),);
 
                // Validate that the remote node maintains up-to-date channel
@@ -263,20 +277,13 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                        });
                }
 
-               // Copy the SCIDs into a new vector to be sent in the SCID query
-               let scid_size = msg.short_channel_ids.len();
-               let mut short_channel_ids: Vec<u64> = Vec::with_capacity(scid_size);
-               for scid in msg.short_channel_ids.iter() {
-                       short_channel_ids.push(scid.clone());
-               }
-
-               log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), scid_size);
+               log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len());
                let mut pending_events = self.pending_events.lock().unwrap();
                pending_events.push(events::MessageSendEvent::SendShortIdsQuery {
                        node_id: their_node_id.clone(),
                        msg: QueryShortChannelIds {
-                               chain_hash: msg.chain_hash.clone(),
-                               short_channel_ids,
+                               chain_hash: msg.chain_hash,
+                               short_channel_ids: msg.short_channel_ids,
                        }
                });
 
@@ -287,7 +294,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
        /// gossip messages. In the event of a failure, we may have received
        /// some channel information. Before trying with another peer, the
        /// caller should update its set of SCIDs that need to be queried.
-       fn handle_reply_short_channel_ids_end(&self, their_node_id: &PublicKey, msg: &ReplyShortChannelIdsEnd) -> Result<(), LightningError> {
+       fn handle_reply_short_channel_ids_end(&self, their_node_id: &PublicKey, msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> {
                log_debug!(self.logger, "Handling reply_short_channel_ids_end peer={}, full_information={}", log_pubkey!(their_node_id), msg.full_information);
 
                // If the remote node does not have up-to-date information for the
@@ -303,10 +310,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                Ok(())
        }
 
-       /// There are potential DoS vectors when handling inbound queries.
-       /// Handling requests with first_blocknum very far away may trigger repeated
-       /// disk I/O if the NetworkGraph is not fully in-memory.
-       fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: &QueryChannelRange) -> Result<(), LightningError> {
+       fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> {
                // TODO
                Err(LightningError {
                        err: String::from("Not implemented"),
@@ -314,10 +318,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                })
        }
 
-       /// There are potential DoS vectors when handling inbound queries.
-       /// Handling requests with first_blocknum very far away may trigger repeated
-       /// disk I/O if the NetworkGraph is not fully in-memory.
-       fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: &QueryShortChannelIds) -> Result<(), LightningError> {
+       fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> {
                // TODO
                Err(LightningError {
                        err: String::from("Not implemented"),
@@ -1011,9 +1012,9 @@ impl NetworkGraph {
 #[cfg(test)]
 mod tests {
        use chain;
-       use ln::features::{ChannelFeatures, NodeFeatures};
+       use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
        use routing::network_graph::{NetGraphMsgHandler, NetworkGraph};
-       use ln::msgs::{OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
+       use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
                UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, HTLCFailChannelUpdate,
                ReplyChannelRange, ReplyShortChannelIdsEnd, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
        use util::test_utils;
@@ -1954,20 +1955,51 @@ mod tests {
                let chain_hash = genesis_block(Network::Testnet).header.block_hash();
                let first_blocknum = 0;
                let number_of_blocks = 0xffff_ffff;
-               net_graph_msg_handler.sync_routing_table(&node_id_1);
+
+               // It should ignore if gossip_queries feature is not enabled
+               {
+                       let init_msg = Init { features: InitFeatures::known().clear_gossip_queries() };
+                       net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg);
+                       let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
+                       assert_eq!(events.len(), 0);
+               }
 
                // It should send a query_channel_message with the correct information
-               let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
-               assert_eq!(events.len(), 1);
-               match &events[0] {
-                       MessageSendEvent::SendChannelRangeQuery{ node_id, msg } => {
-                               assert_eq!(node_id, &node_id_1);
-                               assert_eq!(msg.chain_hash, chain_hash);
-                               assert_eq!(msg.first_blocknum, first_blocknum);
-                               assert_eq!(msg.number_of_blocks, number_of_blocks);
-                       },
-                       _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery")
-               };
+               {
+                       let init_msg = Init { features: InitFeatures::known() };
+                       net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg);
+                       let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
+                       assert_eq!(events.len(), 1);
+                       match &events[0] {
+                               MessageSendEvent::SendChannelRangeQuery{ node_id, msg } => {
+                                       assert_eq!(node_id, &node_id_1);
+                                       assert_eq!(msg.chain_hash, chain_hash);
+                                       assert_eq!(msg.first_blocknum, first_blocknum);
+                                       assert_eq!(msg.number_of_blocks, number_of_blocks);
+                               },
+                               _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery")
+                       };
+               }
+
+               // It should not enqueue a query when should_request_full_sync return false.
+               // The initial implementation allows syncing with the first 5 peers after
+               // which should_request_full_sync will return false
+               {
+                       let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler();
+                       let init_msg = Init { features: InitFeatures::known() };
+                       for n in 1..7 {
+                               let node_privkey = &SecretKey::from_slice(&[n; 32]).unwrap();
+                               let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
+                               net_graph_msg_handler.sync_routing_table(&node_id, &init_msg);
+                               let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
+                               if n <= 5 {
+                                       assert_eq!(events.len(), 1);
+                               } else {
+                                       assert_eq!(events.len(), 0);
+                               }
+
+                       }
+               }
        }
 
        #[test]
@@ -1981,8 +2013,7 @@ mod tests {
                // Test receipt of a single reply that should enqueue an SCID query
                // matching the SCIDs in the reply
                {
-                       // Handle a single successful reply that encompasses the queried channel range
-                       let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, &ReplyChannelRange {
+                       let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange {
                                chain_hash,
                                full_information: true,
                                first_blocknum: 0,
@@ -2023,7 +2054,7 @@ mod tests {
                // full_information=false and short_channel_ids=[] as the signal.
                {
                        // Handle the reply indicating the peer was unable to fulfill our request.
-                       let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, &ReplyChannelRange {
+                       let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange {
                                chain_hash,
                                full_information: false,
                                first_blocknum: 1000,
@@ -2045,7 +2076,7 @@ mod tests {
 
                // Test receipt of a successful reply
                {
-                       let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, &ReplyShortChannelIdsEnd {
+                       let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd {
                                chain_hash,
                                full_information: true,
                        });
@@ -2055,7 +2086,7 @@ mod tests {
                // Test receipt of a reply that indicates the peer does not maintain up-to-date information
                // for the chain_hash requested in the query.
                {
-                       let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, &ReplyShortChannelIdsEnd {
+                       let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd {
                                chain_hash,
                                full_information: false,
                        });
@@ -2072,7 +2103,7 @@ mod tests {
 
                let chain_hash = genesis_block(Network::Testnet).header.block_hash();
 
-               let result = net_graph_msg_handler.handle_query_channel_range(&node_id, &QueryChannelRange {
+               let result = net_graph_msg_handler.handle_query_channel_range(&node_id, QueryChannelRange {
                        chain_hash,
                        first_blocknum: 0,
                        number_of_blocks: 0xffff_ffff,
@@ -2088,7 +2119,7 @@ mod tests {
 
                let chain_hash = genesis_block(Network::Testnet).header.block_hash();
 
-               let result = net_graph_msg_handler.handle_query_short_channel_ids(&node_id, &QueryShortChannelIds {
+               let result = net_graph_msg_handler.handle_query_short_channel_ids(&node_id, QueryShortChannelIds {
                        chain_hash,
                        short_channel_ids: vec![0x0003e8_000000_0000],
                });