//! Instead of actually servicing sockets ourselves we require that you implement the
//! SocketDescriptor interface and use that to receive actions which you should perform on the
//! socket, and call into PeerManager with bytes read from the socket. The PeerManager will then
-//! call into the provided message handlers (probably a ChannelManager and NetGraphmsgHandler) with messages
-//! they should handle, and encoding/sending response messages.
+//! call into the provided message handlers (probably a ChannelManager and P2PGossipSync) with
+//! messages they should handle, and encoding/sending response messages.
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
use ln::features::InitFeatures;
use ln::msgs;
use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
use ln::wire;
use ln::wire::Encode;
+use routing::gossip::{NetworkGraph, P2PGossipSync};
use util::atomic_counter::AtomicCounter;
use util::events::{MessageSendEvent, MessageSendEventsProvider};
use util::logger::Logger;
-use routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
use prelude::*;
use io;
fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
- fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) ->
- Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { Vec::new() }
- fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> { Vec::new() }
+ fn get_next_channel_announcement(&self, _starting_point: u64) ->
+ Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { None }
+ fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option<msgs::NodeAnnouncement> { None }
fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {}
fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
- fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) {
+ fn handle_channel_ready(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReady) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_shutdown(&self, their_node_id: &PublicKey, _their_features: &InitFeatures, msg: &msgs::Shutdown) {
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub chan_handler: CM,
/// A message handler which handles messages updating our knowledge of the network channel
- /// graph. Usually this is just a [`NetGraphMsgHandler`] object or an
- /// [`IgnoringMessageHandler`].
+ /// graph. Usually this is just a [`P2PGossipSync`] object or an [`IgnoringMessageHandler`].
///
- /// [`NetGraphMsgHandler`]: crate::routing::network_graph::NetGraphMsgHandler
+ /// [`P2PGossipSync`]: crate::routing::gossip::P2PGossipSync
pub route_handler: RM,
}
/// descriptor.
#[derive(Clone)]
pub struct PeerHandleError {
- /// Used to indicate that we probably can't make any future connections to this peer, implying
- /// we should go ahead and force-close any channels we have with it.
+ /// Used to indicate that we probably can't make any future connections to this peer (e.g.
+ /// because we required features that our peer was missing, or vice versa).
+ ///
+ /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
+ /// any channels with this peer or check for new versions of LDK.
+ ///
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub no_connection_possible: bool,
}
impl fmt::Debug for PeerHandleError {
/// tick. Once we have sent this many messages since the last ping, we send a ping right away to
/// ensures we don't just fill up our send buffer and leave the peer with too many messages to
/// process before the next ping.
+///
+/// Note that we continue responding to other messages even after we've sent this many messages, so
+/// it's more of a general guideline used for gossip backfill (and gossip forwarding, times
+/// [`FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO`]) than a hard limit.
const BUFFER_DRAIN_MSGS_PER_TICK: usize = 32;
struct Peer {
InitSyncTracker::NodesSyncing(pk) => pk < node_id,
}
}
+
+ /// Returns whether we should be reading bytes from this peer, based on whether its outbound
+ /// buffer still has space and we don't need to pause reads to get some writes out.
+ fn should_read(&self) -> bool {
+ self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE
+ }
+
+ /// Determines if we should push additional gossip messages onto a peer's outbound buffer for
+ /// backfilling gossip data to the peer. This is checked every time the peer's buffer may have
+ /// been drained.
+ fn should_buffer_gossip_backfill(&self) -> bool {
+ self.pending_outbound_buffer.is_empty() &&
+ self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+ }
+
+ /// Returns whether this peer's buffer is full and we should drop gossip messages.
+ fn buffer_full_drop_gossip(&self) -> bool {
+ if self.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
+ || self.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO {
+ return false
+ }
+ true
+ }
}
/// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
/// issues such as overly long function definitions.
///
/// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<NetGraphMsgHandler<Arc<NetworkGraph>, Arc<C>, Arc<L>>>, Arc<L>, Arc<IgnoringMessageHandler>>;
+pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<L>>>, Arc<C>, Arc<L>>>, Arc<L>, Arc<IgnoringMessageHandler>>;
/// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference
/// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't
/// helps with issues such as long function definitions.
///
/// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L>, &'e NetGraphMsgHandler<&'g NetworkGraph, &'h C, &'f L>, &'f L, IgnoringMessageHandler>;
+pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L>, &'e P2PGossipSync<&'g NetworkGraph<&'f L>, &'h C, &'f L>, &'f L, IgnoringMessageHandler>;
/// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
/// socket events into messages which it passes on to its [`MessageHandler`].
peer_counter: AtomicCounter,
logger: L,
+ secp_ctx: Secp256k1<secp256k1::SignOnly>
}
enum MessageHandlingError {
let mut ephemeral_key_midstate = Sha256::engine();
ephemeral_key_midstate.input(ephemeral_random_data);
+ let mut secp_ctx = Secp256k1::signing_only();
+ let ephemeral_hash = Sha256::from_engine(ephemeral_key_midstate.clone()).into_inner();
+ secp_ctx.seeded_randomize(&ephemeral_hash);
+
PeerManager {
message_handler,
peers: FairRwLock::new(HashMap::new()),
peer_counter: AtomicCounter::new(),
logger,
custom_message_handler,
+ secp_ctx,
}
}
/// peer using the init message.
/// The user should pass the remote network address of the host they are connected to.
///
- /// Note that if an Err is returned here you MUST NOT call socket_disconnected for the new
- /// descriptor but must disconnect the connection immediately.
+ /// If an `Err` is returned here you must disconnect the connection immediately.
///
/// Returns a small number of bytes to send to the remote node (currently always 50).
///
/// [`socket_disconnected()`]: PeerManager::socket_disconnected
pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<Vec<u8>, PeerHandleError> {
let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
- let res = peer_encryptor.get_act_one().to_vec();
+ let res = peer_encryptor.get_act_one(&self.secp_ctx).to_vec();
let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
let mut peers = self.peers.write().unwrap();
/// The user should pass the remote network address of the host they are connected to.
///
/// May refuse the connection by returning an Err, but will never write bytes to the remote end
- /// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT
- /// call socket_disconnected for the new descriptor but must disconnect the connection
- /// immediately.
+ /// (outbound connector always speaks first). If an `Err` is returned here you must disconnect
+ /// the connection immediately.
///
/// Panics if descriptor is duplicative with some other descriptor which has not yet been
/// [`socket_disconnected()`].
///
/// [`socket_disconnected()`]: PeerManager::socket_disconnected
pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<(), PeerHandleError> {
- let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret);
+ let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret, &self.secp_ctx);
let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
let mut peers = self.peers.write().unwrap();
fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) {
while !peer.awaiting_write_event {
- if peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE && peer.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK {
+ if peer.should_buffer_gossip_backfill() {
match peer.sync_status {
InitSyncTracker::NoSyncRequested => {},
InitSyncTracker::ChannelsSyncing(c) if c < 0xffff_ffff_ffff_ffff => {
- let steps = ((OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len() + 2) / 3) as u8;
- let all_messages = self.message_handler.route_handler.get_next_channel_announcements(c, steps);
- for &(ref announce, ref update_a_option, ref update_b_option) in all_messages.iter() {
- self.enqueue_message(peer, announce);
- if let &Some(ref update_a) = update_a_option {
- self.enqueue_message(peer, update_a);
+ if let Some((announce, update_a_option, update_b_option)) =
+ self.message_handler.route_handler.get_next_channel_announcement(c)
+ {
+ self.enqueue_message(peer, &announce);
+ if let Some(update_a) = update_a_option {
+ self.enqueue_message(peer, &update_a);
}
- if let &Some(ref update_b) = update_b_option {
- self.enqueue_message(peer, update_b);
+ if let Some(update_b) = update_b_option {
+ self.enqueue_message(peer, &update_b);
}
peer.sync_status = InitSyncTracker::ChannelsSyncing(announce.contents.short_channel_id + 1);
- }
- if all_messages.is_empty() || all_messages.len() != steps as usize {
+ } else {
peer.sync_status = InitSyncTracker::ChannelsSyncing(0xffff_ffff_ffff_ffff);
}
},
InitSyncTracker::ChannelsSyncing(c) if c == 0xffff_ffff_ffff_ffff => {
- let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
- let all_messages = self.message_handler.route_handler.get_next_node_announcements(None, steps);
- for msg in all_messages.iter() {
- self.enqueue_message(peer, msg);
+ if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(None) {
+ self.enqueue_message(peer, &msg);
peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
- }
- if all_messages.is_empty() || all_messages.len() != steps as usize {
+ } else {
peer.sync_status = InitSyncTracker::NoSyncRequested;
}
},
InitSyncTracker::ChannelsSyncing(_) => unreachable!(),
InitSyncTracker::NodesSyncing(key) => {
- let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
- let all_messages = self.message_handler.route_handler.get_next_node_announcements(Some(&key), steps);
- for msg in all_messages.iter() {
- self.enqueue_message(peer, msg);
+ if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(Some(&key)) {
+ self.enqueue_message(peer, &msg);
peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
- }
- if all_messages.is_empty() || all_messages.len() != steps as usize {
+ } else {
peer.sync_status = InitSyncTracker::NoSyncRequested;
}
},
self.maybe_send_extra_ping(peer);
}
- if {
- let next_buff = match peer.pending_outbound_buffer.front() {
- None => return,
- Some(buff) => buff,
- };
+ let next_buff = match peer.pending_outbound_buffer.front() {
+ None => return,
+ Some(buff) => buff,
+ };
- let should_be_reading = peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE;
- let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
- let data_sent = descriptor.send_data(pending, should_be_reading);
- peer.pending_outbound_buffer_first_msg_offset += data_sent;
- if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() { true } else { false }
- } {
+ let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
+ let data_sent = descriptor.send_data(pending, peer.should_read());
+ peer.pending_outbound_buffer_first_msg_offset += data_sent;
+ if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() {
peer.pending_outbound_buffer_first_msg_offset = 0;
peer.pending_outbound_buffer.pop_front();
} else {
let next_step = peer.channel_encryptor.get_noise_step();
match next_step {
NextNoiseStep::ActOne => {
- let act_two = try_potential_handleerror!(peer,
- peer.channel_encryptor.process_act_one_with_keys(&peer.pending_read_buffer[..], &self.our_node_secret, self.get_ephemeral_key())).to_vec();
+ let act_two = try_potential_handleerror!(peer, peer.channel_encryptor
+ .process_act_one_with_keys(&peer.pending_read_buffer[..],
+ &self.our_node_secret, self.get_ephemeral_key(), &self.secp_ctx)).to_vec();
peer.pending_outbound_buffer.push_back(act_two);
peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long
},
NextNoiseStep::ActTwo => {
let (act_three, their_node_id) = try_potential_handleerror!(peer,
- peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..], &self.our_node_secret));
+ peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..],
+ &self.our_node_secret, &self.secp_ctx));
peer.pending_outbound_buffer.push_back(act_three.to_vec());
peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
peer.pending_read_is_header = true;
}
}
}
- pause_read = peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_READ_PAUSE;
+ pause_read = !peer.should_read();
if let Some(message) = msg_to_handle {
match self.handle_message(&peer_mutex, peer_lock, message) {
wire::Message::FundingSigned(msg) => {
self.message_handler.chan_handler.handle_funding_signed(&their_node_id, &msg);
},
- wire::Message::FundingLocked(msg) => {
- self.message_handler.chan_handler.handle_funding_locked(&their_node_id, &msg);
+ wire::Message::ChannelReady(msg) => {
+ self.message_handler.chan_handler.handle_channel_ready(&their_node_id, &msg);
},
wire::Message::Shutdown(msg) => {
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
- if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
- || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
- {
+ if peer.buffer_full_drop_gossip() {
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
!peer.should_forward_node_announcement(msg.contents.node_id) {
continue
}
- if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
- || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
- {
+ if peer.buffer_full_drop_gossip() {
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
- if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
- || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
- {
+ if peer.buffer_full_drop_gossip() {
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
log_bytes!(msg.channel_id));
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
- MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}",
+ MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
use util::test_utils;
use bitcoin::secp256k1::Secp256k1;
- use bitcoin::secp256k1::key::{SecretKey, PublicKey};
+ use bitcoin::secp256k1::{SecretKey, PublicKey};
use prelude::*;
use sync::{Arc, Mutex};
peer_a.new_inbound_connection(fd_a.clone(), None).unwrap();
assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
peer_a.process_events();
- assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+
+ let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+
peer_b.process_events();
- assert_eq!(peer_a.read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+ let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peer_a.read_event(&mut fd_a, &b_data).unwrap(), false);
+
peer_a.process_events();
- assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+ let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+
(fd_a.clone(), fd_b.clone())
}
// Check that each peer has received the expected number of channel updates and channel
// announcements.
- assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
- assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
- assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
- assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
+ assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 108);
+ assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 54);
+ assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 108);
+ assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 54);
}
#[test]
assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false);
peers[0].process_events();
- assert_eq!(peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+ let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false);
peers[1].process_events();
// ...but if we get a second timer tick, we should disconnect the peer
peers[0].timer_tick_occurred();
assert_eq!(peers[0].peers.read().unwrap().len(), 0);
- assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err());
+ let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+ assert!(peers[0].read_event(&mut fd_a, &b_data).is_err());
}
#[test]