From d3fb619020df91959d2c20bdf5ef76d1d63f9778 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 10 Feb 2020 11:13:41 -0800 Subject: [PATCH] Move initial_routing_sync decision to the Router PeerManager determines whether the initial_routing_sync feature bit should be set when sending Init messages to peers. Move this to the Router as it is better able to determine if a full sync is needed. --- lightning/src/ln/msgs.rs | 2 ++ lightning/src/ln/peer_handler.rs | 11 ++--------- lightning/src/ln/router.rs | 16 ++++++++++++++++ lightning/src/util/test_utils.rs | 3 +++ 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 483d69e7..519c5654 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -604,6 +604,8 @@ pub trait RoutingMessageHandler : Send + Sync { /// starting at the node *after* the provided publickey and including batch_amount entries. /// If None is provided for starting_point, we start at the first node. fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec; + /// Returns whether a full sync should be requested from a peer. + fn should_request_full_sync(&self, node_id: &PublicKey) -> bool; } pub(crate) struct OnionRealm0HopData { diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index e25e50c2..2e7ce4e2 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -189,7 +189,6 @@ pub struct PeerManager where CM::Target peer_counter_low: AtomicUsize, peer_counter_high: AtomicUsize, - initial_syncs_sent: AtomicUsize, logger: Arc, } @@ -212,9 +211,6 @@ macro_rules! encode_msg { }} } -//TODO: Really should do something smarter for this -const INITIAL_SYNCS_TO_SEND: usize = 5; - /// Manages and reacts to connection events. You probably want to use file descriptors as PeerIds. /// PeerIds may repeat, but only after disconnect_event() has been called. impl PeerManager where CM::Target: msgs::ChannelMessageHandler { @@ -236,7 +232,6 @@ impl PeerManager where ephemeral_key_midstate, peer_counter_low: AtomicUsize::new(0), peer_counter_high: AtomicUsize::new(0), - initial_syncs_sent: AtomicUsize::new(0), logger, } } @@ -580,8 +575,7 @@ impl PeerManager where peer.their_node_id = Some(their_node_id); insert_node_id!(); let mut features = InitFeatures::supported(); - if self.initial_syncs_sent.load(Ordering::Acquire) < INITIAL_SYNCS_TO_SEND { - self.initial_syncs_sent.fetch_add(1, Ordering::AcqRel); + if self.message_handler.route_handler.should_request_full_sync(&peer.their_node_id.unwrap()) { features.set_initial_routing_sync(); } @@ -652,8 +646,7 @@ impl PeerManager where if !peer.outbound { let mut features = InitFeatures::supported(); - if self.initial_syncs_sent.load(Ordering::Acquire) < INITIAL_SYNCS_TO_SEND { - self.initial_syncs_sent.fetch_add(1, Ordering::AcqRel); + if self.message_handler.route_handler.should_request_full_sync(&peer.their_node_id.unwrap()) { features.set_initial_routing_sync(); } diff --git a/lightning/src/ln/router.rs b/lightning/src/ln/router.rs index 00256560..1791bb6f 100644 --- a/lightning/src/ln/router.rs +++ b/lightning/src/ln/router.rs @@ -22,6 +22,7 @@ use util::logger::Logger; use std::cmp; use std::sync::{RwLock,Arc}; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::collections::{HashMap,BinaryHeap,BTreeMap}; use std::collections::btree_map::Entry as BtreeEntry; use std; @@ -347,6 +348,7 @@ pub struct RouteHint { pub struct Router { secp_ctx: Secp256k1, network_map: RwLock, + full_syncs_requested: AtomicUsize, chain_monitor: Arc, logger: Arc, } @@ -390,6 +392,7 @@ impl ReadableArgs for Router { Ok(Router { secp_ctx: Secp256k1::verification_only(), network_map: RwLock::new(network_map), + full_syncs_requested: AtomicUsize::new(0), chain_monitor: args.chain_monitor, logger: args.logger, }) @@ -406,6 +409,7 @@ macro_rules! secp_verify_sig { } impl RoutingMessageHandler for Router { + fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id); @@ -698,6 +702,17 @@ impl RoutingMessageHandler for Router { } result } + + fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { + //TODO: Determine whether to request a full sync based on the network map. + const FULL_SYNCS_TO_REQUEST: usize = 5; + if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST { + self.full_syncs_requested.fetch_add(1, Ordering::AcqRel); + true + } else { + false + } + } } #[derive(Eq, PartialEq)] @@ -750,6 +765,7 @@ impl Router { our_node_id: our_pubkey, nodes: nodes, }), + full_syncs_requested: AtomicUsize::new(0), chain_monitor, logger, } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index aa68fdb1..cd2064a4 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -155,6 +155,9 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec { Vec::new() } + fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { + true + } } pub struct TestLogger { -- 2.30.2