/// starting at the node *after* the provided publickey and including batch_amount entries.
/// If None is provided for starting_point, we start at the first node.
fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement>;
+ /// Returns whether a full sync should be requested from a peer.
+ fn should_request_full_sync(&self, node_id: &PublicKey) -> bool;
}
pub(crate) struct OnionRealm0HopData {
peer_counter_low: AtomicUsize,
peer_counter_high: AtomicUsize,
- initial_syncs_sent: AtomicUsize,
logger: Arc<Logger>,
}
}}
}
-//TODO: Really should do something smarter for this
-const INITIAL_SYNCS_TO_SEND: usize = 5;
-
/// Manages and reacts to connection events. You probably want to use file descriptors as PeerIds.
/// PeerIds may repeat, but only after disconnect_event() has been called.
impl<Descriptor: SocketDescriptor, CM: Deref> PeerManager<Descriptor, CM> where CM::Target: msgs::ChannelMessageHandler {
ephemeral_key_midstate,
peer_counter_low: AtomicUsize::new(0),
peer_counter_high: AtomicUsize::new(0),
- initial_syncs_sent: AtomicUsize::new(0),
logger,
}
}
peer.their_node_id = Some(their_node_id);
insert_node_id!();
let mut features = InitFeatures::supported();
- if self.initial_syncs_sent.load(Ordering::Acquire) < INITIAL_SYNCS_TO_SEND {
- self.initial_syncs_sent.fetch_add(1, Ordering::AcqRel);
+ if self.message_handler.route_handler.should_request_full_sync(&peer.their_node_id.unwrap()) {
features.set_initial_routing_sync();
}
if !peer.outbound {
let mut features = InitFeatures::supported();
- if self.initial_syncs_sent.load(Ordering::Acquire) < INITIAL_SYNCS_TO_SEND {
- self.initial_syncs_sent.fetch_add(1, Ordering::AcqRel);
+ if self.message_handler.route_handler.should_request_full_sync(&peer.their_node_id.unwrap()) {
features.set_initial_routing_sync();
}
use std::cmp;
use std::sync::{RwLock,Arc};
+use std::sync::atomic::{AtomicUsize, Ordering};
use std::collections::{HashMap,BinaryHeap,BTreeMap};
use std::collections::btree_map::Entry as BtreeEntry;
use std;
pub struct Router {
secp_ctx: Secp256k1<secp256k1::VerifyOnly>,
network_map: RwLock<NetworkMap>,
+ full_syncs_requested: AtomicUsize,
chain_monitor: Arc<ChainWatchInterface>,
logger: Arc<Logger>,
}
Ok(Router {
secp_ctx: Secp256k1::verification_only(),
network_map: RwLock::new(network_map),
+ full_syncs_requested: AtomicUsize::new(0),
chain_monitor: args.chain_monitor,
logger: args.logger,
})
}
impl RoutingMessageHandler for Router {
+
fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id);
}
result
}
+
+ fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool {
+ //TODO: Determine whether to request a full sync based on the network map.
+ const FULL_SYNCS_TO_REQUEST: usize = 5;
+ if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST {
+ self.full_syncs_requested.fetch_add(1, Ordering::AcqRel);
+ true
+ } else {
+ false
+ }
+ }
}
#[derive(Eq, PartialEq)]
our_node_id: our_pubkey,
nodes: nodes,
}),
+ full_syncs_requested: AtomicUsize::new(0),
chain_monitor,
logger,
}
fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
Vec::new()
}
+ fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool {
+ true
+ }
}
pub struct TestLogger {