//! Instead of actually servicing sockets ourselves we require that you implement the
//! SocketDescriptor interface and use that to receive actions which you should perform on the
//! socket, and call into PeerManager with bytes read from the socket. The PeerManager will then
-//! call into the provided message handlers (probably a ChannelManager and NetGraphmsgHandler) with messages
-//! they should handle, and encoding/sending response messages.
-
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
-
-use ln::features::InitFeatures;
-use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, LightningError, RoutingMessageHandler};
-use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
-use util::ser::{VecWriter, Writeable, Writer};
-use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
-use ln::wire;
-use util::atomic_counter::AtomicCounter;
-use util::events::{MessageSendEvent, MessageSendEventsProvider};
-use util::logger::Logger;
-use routing::network_graph::NetGraphMsgHandler;
-
-use prelude::*;
-use io;
+//! call into the provided message handlers (probably a ChannelManager and P2PGossipSync) with
+//! messages they should handle, and encoding/sending response messages.
+
+use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
+
+use crate::ln::features::{InitFeatures, NodeFeatures};
+use crate::ln::msgs;
+use crate::ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler};
+use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
+use crate::util::ser::{VecWriter, Writeable, Writer};
+use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
+use crate::ln::wire;
+use crate::ln::wire::Encode;
+use crate::onion_message::{CustomOnionMessageContents, CustomOnionMessageHandler, SimpleArcOnionMessenger, SimpleRefOnionMessenger};
+use crate::routing::gossip::{NetworkGraph, P2PGossipSync};
+use crate::util::atomic_counter::AtomicCounter;
+use crate::util::crypto::sign;
+use crate::util::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider};
+use crate::util::logger::Logger;
+
+use crate::prelude::*;
+use crate::io;
use alloc::collections::LinkedList;
-use sync::{Arc, Mutex};
+use crate::sync::{Arc, Mutex, MutexGuard, FairRwLock};
+use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use core::{cmp, hash, fmt, mem};
use core::ops::Deref;
use core::convert::Infallible;
#[cfg(feature = "std")] use std::error;
use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hashes::sha256::HashEngine as Sha256Engine;
use bitcoin::hashes::{HashEngine, Hash};
fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
- fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) ->
- Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { Vec::new() }
- fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> { Vec::new() }
- fn sync_routing_table(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {}
+ fn get_next_channel_announcement(&self, _starting_point: u64) ->
+ Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { None }
+ fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option<msgs::NodeAnnouncement> { None }
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
+ fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+ fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+ InitFeatures::empty()
+ }
+}
+impl OnionMessageProvider for IgnoringMessageHandler {
+ fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option<msgs::OnionMessage> { None }
+}
+impl OnionMessageHandler for IgnoringMessageHandler {
+ fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {}
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+ fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
+ fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+ fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+ InitFeatures::empty()
+ }
+}
+impl CustomOnionMessageHandler for IgnoringMessageHandler {
+ type CustomMessage = Infallible;
+ fn handle_custom_message(&self, _msg: Infallible) {
+ // Since we always return `None` in the read the handle method should never be called.
+ unreachable!();
+ }
+ fn read_custom_message<R: io::Read>(&self, _msg_type: u64, _buffer: &mut R) -> Result<Option<Infallible>, msgs::DecodeError> where Self: Sized {
+ Ok(None)
+ }
}
+
+impl CustomOnionMessageContents for Infallible {
+ fn tlv_type(&self) -> u64 { unreachable!(); }
+}
+
impl Deref for IgnoringMessageHandler {
type Target = IgnoringMessageHandler;
fn deref(&self) -> &Self { self }
fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
- fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) {
+ fn handle_channel_ready(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReady) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_shutdown(&self, their_node_id: &PublicKey, _their_features: &InitFeatures, msg: &msgs::Shutdown) {
// msgs::ChannelUpdate does not contain the channel_id field, so we just drop them.
fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
- fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {}
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
+ fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+ fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+ // Set a number of features which various nodes may require to talk to us. It's totally
+ // reasonable to indicate we "support" all kinds of channel features...we just reject all
+ // channels.
+ let mut features = InitFeatures::empty();
+ features.set_data_loss_protect_optional();
+ features.set_upfront_shutdown_script_optional();
+ features.set_variable_length_onion_optional();
+ features.set_static_remote_key_optional();
+ features.set_payment_secret_optional();
+ features.set_basic_mpp_optional();
+ features.set_wumbo_optional();
+ features.set_shutdown_any_segwit_optional();
+ features.set_channel_type_optional();
+ features.set_scid_privacy_optional();
+ features.set_zero_conf_optional();
+ features
+ }
}
impl Deref for ErroringMessageHandler {
type Target = ErroringMessageHandler;
}
/// Provides references to trait impls which handle different types of messages.
-pub struct MessageHandler<CM: Deref, RM: Deref> where
+pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref> where
CM::Target: ChannelMessageHandler,
- RM::Target: RoutingMessageHandler {
+ RM::Target: RoutingMessageHandler,
+ OM::Target: OnionMessageHandler,
+{
/// A message handler which handles messages specific to channels. Usually this is just a
/// [`ChannelManager`] object or an [`ErroringMessageHandler`].
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub chan_handler: CM,
/// A message handler which handles messages updating our knowledge of the network channel
- /// graph. Usually this is just a [`NetGraphMsgHandler`] object or an
- /// [`IgnoringMessageHandler`].
+ /// graph. Usually this is just a [`P2PGossipSync`] object or an [`IgnoringMessageHandler`].
///
- /// [`NetGraphMsgHandler`]: crate::routing::network_graph::NetGraphMsgHandler
+ /// [`P2PGossipSync`]: crate::routing::gossip::P2PGossipSync
pub route_handler: RM,
+
+ /// A message handler which handles onion messages. For now, this can only be an
+ /// [`IgnoringMessageHandler`].
+ pub onion_message_handler: OM,
}
/// Provides an object which can be used to send data to and which uniquely identifies a connection
/// descriptor.
#[derive(Clone)]
pub struct PeerHandleError {
- /// Used to indicate that we probably can't make any future connections to this peer, implying
- /// we should go ahead and force-close any channels we have with it.
+ /// Used to indicate that we probably can't make any future connections to this peer (e.g.
+ /// because we required features that our peer was missing, or vice versa).
+ ///
+ /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
+ /// any channels with this peer or check for new versions of LDK.
+ ///
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub no_connection_possible: bool,
}
impl fmt::Debug for PeerHandleError {
NodesSyncing(PublicKey),
}
+/// The ratio between buffer sizes at which we stop sending initial sync messages vs when we stop
+/// forwarding gossip messages to peers altogether.
+const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2;
+
/// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until
/// we have fewer than this many messages in the outbound buffer again.
/// We also use this as the target number of outbound gossip messages to keep in the write buffer,
/// refilled as we send bytes.
-const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10;
+const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 12;
/// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to
/// the peer.
-const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = 20;
+const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO;
+
+/// If we've sent a ping, and are still awaiting a response, we may need to churn our way through
+/// the socket receive buffer before receiving the ping.
+///
+/// On a fairly old Arm64 board, with Linux defaults, this can take as long as 20 seconds, not
+/// including any network delays, outbound traffic, or the same for messages from other peers.
+///
+/// Thus, to avoid needlessly disconnecting a peer, we allow a peer to take this many timer ticks
+/// per connected peer to respond to a ping, as long as they send us at least one message during
+/// each tick, ensuring we aren't actually just disconnected.
+/// With a timer tick interval of ten seconds, this translates to about 40 seconds per connected
+/// peer.
+///
+/// When we improve parallelism somewhat we should reduce this to e.g. this many timer ticks per
+/// two connected peers, assuming most LDK-running systems have at least two cores.
+const MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER: i8 = 4;
+
+/// This is the minimum number of messages we expect a peer to be able to handle within one timer
+/// tick. Once we have sent this many messages since the last ping, we send a ping right away to
+/// ensures we don't just fill up our send buffer and leave the peer with too many messages to
+/// process before the next ping.
+///
+/// Note that we continue responding to other messages even after we've sent this many messages, so
+/// it's more of a general guideline used for gossip backfill (and gossip forwarding, times
+/// [`FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO`]) than a hard limit.
+const BUFFER_DRAIN_MSGS_PER_TICK: usize = 32;
struct Peer {
channel_encryptor: PeerChannelEncryptor,
their_node_id: Option<PublicKey>,
their_features: Option<InitFeatures>,
+ their_net_address: Option<NetAddress>,
pending_outbound_buffer: LinkedList<Vec<u8>>,
pending_outbound_buffer_first_msg_offset: usize,
+ /// Queue gossip broadcasts separately from `pending_outbound_buffer` so we can easily
+ /// prioritize channel messages over them.
+ ///
+ /// Note that these messages are *not* encrypted/MAC'd, and are only serialized.
+ gossip_broadcast_buffer: LinkedList<Vec<u8>>,
awaiting_write_event: bool,
pending_read_buffer: Vec<u8>,
sync_status: InitSyncTracker,
- awaiting_pong: bool,
+ msgs_sent_since_pong: usize,
+ awaiting_pong_timer_tick_intervals: i8,
+ received_message_since_timer_tick: bool,
+ sent_gossip_timestamp_filter: bool,
}
impl Peer {
/// announcements/updates for the given channel_id then we will send it when we get to that
/// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already
/// sent the old versions, we should send the update, and so return true here.
- fn should_forward_channel_announcement(&self, channel_id: u64)->bool{
+ fn should_forward_channel_announcement(&self, channel_id: u64) -> bool {
+ if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
+ !self.sent_gossip_timestamp_filter {
+ return false;
+ }
match self.sync_status {
InitSyncTracker::NoSyncRequested => true,
InitSyncTracker::ChannelsSyncing(i) => i < channel_id,
/// Similar to the above, but for node announcements indexed by node_id.
fn should_forward_node_announcement(&self, node_id: PublicKey) -> bool {
+ if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
+ !self.sent_gossip_timestamp_filter {
+ return false;
+ }
match self.sync_status {
InitSyncTracker::NoSyncRequested => true,
InitSyncTracker::ChannelsSyncing(_) => false,
InitSyncTracker::NodesSyncing(pk) => pk < node_id,
}
}
-}
-struct PeerHolder<Descriptor: SocketDescriptor> {
- peers: HashMap<Descriptor, Peer>,
- /// Only add to this set when noise completes:
- node_id_to_descriptor: HashMap<PublicKey, Descriptor>,
+ /// Returns whether we should be reading bytes from this peer, based on whether its outbound
+ /// buffer still has space and we don't need to pause reads to get some writes out.
+ fn should_read(&self) -> bool {
+ self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE
+ }
+
+ /// Determines if we should push additional gossip background sync (aka "backfill") onto a peer's
+ /// outbound buffer. This is checked every time the peer's buffer may have been drained.
+ fn should_buffer_gossip_backfill(&self) -> bool {
+ self.pending_outbound_buffer.is_empty() && self.gossip_broadcast_buffer.is_empty()
+ && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+ }
+
+ /// Determines if we should push an onion message onto a peer's outbound buffer. This is checked
+ /// every time the peer's buffer may have been drained.
+ fn should_buffer_onion_message(&self) -> bool {
+ self.pending_outbound_buffer.is_empty()
+ && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+ }
+
+ /// Determines if we should push additional gossip broadcast messages onto a peer's outbound
+ /// buffer. This is checked every time the peer's buffer may have been drained.
+ fn should_buffer_gossip_broadcast(&self) -> bool {
+ self.pending_outbound_buffer.is_empty()
+ && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+ }
+
+ /// Returns whether this peer's outbound buffers are full and we should drop gossip broadcasts.
+ fn buffer_full_drop_gossip_broadcast(&self) -> bool {
+ let total_outbound_buffered =
+ self.gossip_broadcast_buffer.len() + self.pending_outbound_buffer.len();
+
+ total_outbound_buffered > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP ||
+ self.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
+ }
}
/// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
/// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents
/// issues such as overly long function definitions.
-pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<NetGraphMsgHandler<Arc<C>, Arc<L>>>, Arc<L>, Arc<IgnoringMessageHandler>>;
+///
+/// (C-not exported) as `Arc`s don't make sense in bindings.
+pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<L>>>, Arc<C>, Arc<L>>>, Arc<SimpleArcOnionMessenger<L>>, Arc<L>, IgnoringMessageHandler>;
/// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference
/// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't
/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
/// helps with issues such as long function definitions.
-pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L>, &'e NetGraphMsgHandler<&'g C, &'f L>, &'f L, IgnoringMessageHandler>;
+///
+/// (C-not exported) as general type aliases don't make sense in bindings.
+pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L>, &'e P2PGossipSync<&'g NetworkGraph<&'f L>, &'h C, &'f L>, &'i SimpleRefOnionMessenger<'j, 'k, L>, &'f L, IgnoringMessageHandler>;
/// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
/// socket events into messages which it passes on to its [`MessageHandler`].
/// you're using lightning-net-tokio.
///
/// [`read_event`]: PeerManager::read_event
-pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> where
+pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref> where
CM::Target: ChannelMessageHandler,
RM::Target: RoutingMessageHandler,
+ OM::Target: OnionMessageHandler,
L::Target: Logger,
CMH::Target: CustomMessageHandler {
- message_handler: MessageHandler<CM, RM>,
- peers: Mutex<PeerHolder<Descriptor>>,
+ message_handler: MessageHandler<CM, RM, OM>,
+ /// Connection state for each connected peer - we have an outer read-write lock which is taken
+ /// as read while we're doing processing for a peer and taken write when a peer is being added
+ /// or removed.
+ ///
+ /// The inner Peer lock is held for sending and receiving bytes, but note that we do *not* hold
+ /// it while we're processing a message. This is fine as [`PeerManager::read_event`] requires
+ /// that there be no parallel calls for a given peer, so mutual exclusion of messages handed to
+ /// the `MessageHandler`s for a given peer is already guaranteed.
+ peers: FairRwLock<HashMap<Descriptor, Mutex<Peer>>>,
+ /// Only add to this set when noise completes.
+ /// Locked *after* peers. When an item is removed, it must be removed with the `peers` write
+ /// lock held. Entries may be added with only the `peers` read lock held (though the
+ /// `Descriptor` value must already exist in `peers`).
+ node_id_to_descriptor: Mutex<HashMap<PublicKey, Descriptor>>,
+ /// We can only have one thread processing events at once, but we don't usually need the full
+ /// `peers` write lock to do so, so instead we block on this empty mutex when entering
+ /// `process_events`.
+ event_processing_lock: Mutex<()>,
+ /// Because event processing is global and always does all available work before returning,
+ /// there is no reason for us to have many event processors waiting on the lock at once.
+ /// Instead, we limit the total blocked event processors to always exactly one by setting this
+ /// when an event process call is waiting.
+ blocked_event_processors: AtomicBool,
+
+ /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
+ /// value increases strictly since we don't assume access to a time source.
+ last_node_announcement_serial: AtomicU32,
+
our_node_secret: SecretKey,
ephemeral_key_midstate: Sha256Engine,
custom_message_handler: CMH,
peer_counter: AtomicCounter,
logger: L,
+ secp_ctx: Secp256k1<secp256k1::SignOnly>
}
enum MessageHandlingError {
}}
}
-impl<Descriptor: SocketDescriptor, CM: Deref, L: Deref> PeerManager<Descriptor, CM, IgnoringMessageHandler, L, IgnoringMessageHandler> where
+impl<Descriptor: SocketDescriptor, CM: Deref, OM: Deref, L: Deref> PeerManager<Descriptor, CM, IgnoringMessageHandler, OM, L, IgnoringMessageHandler> where
CM::Target: ChannelMessageHandler,
+ OM::Target: OnionMessageHandler,
L::Target: Logger {
- /// Constructs a new PeerManager with the given ChannelMessageHandler. No routing message
- /// handler is used and network graph messages are ignored.
+ /// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and
+ /// `OnionMessageHandler`. No routing message handler is used and network graph messages are
+ /// ignored.
///
/// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
///
+ /// `current_time` is used as an always-increasing counter that survives across restarts and is
+ /// incremented irregularly internally. In general it is best to simply use the current UNIX
+ /// timestamp, however if it is not available a persistent counter that increases once per
+ /// minute should suffice.
+ ///
/// (C-not exported) as we can't export a PeerManager with a dummy route handler
- pub fn new_channel_only(channel_message_handler: CM, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L) -> Self {
+ pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, our_node_secret: SecretKey, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L) -> Self {
Self::new(MessageHandler {
chan_handler: channel_message_handler,
route_handler: IgnoringMessageHandler{},
- }, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{})
+ onion_message_handler,
+ }, our_node_secret, current_time, ephemeral_random_data, logger, IgnoringMessageHandler{})
}
}
-impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref> PeerManager<Descriptor, ErroringMessageHandler, RM, L, IgnoringMessageHandler> where
+impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref> PeerManager<Descriptor, ErroringMessageHandler, RM, IgnoringMessageHandler, L, IgnoringMessageHandler> where
RM::Target: RoutingMessageHandler,
L::Target: Logger {
- /// Constructs a new PeerManager with the given RoutingMessageHandler. No channel message
- /// handler is used and messages related to channels will be ignored (or generate error
- /// messages). Note that some other lightning implementations time-out connections after some
- /// time if no channel is built with the peer.
+ /// Constructs a new `PeerManager` with the given `RoutingMessageHandler`. No channel message
+ /// handler or onion message handler is used and onion and channel messages will be ignored (or
+ /// generate error messages). Note that some other lightning implementations time-out connections
+ /// after some time if no channel is built with the peer.
+ ///
+ /// `current_time` is used as an always-increasing counter that survives across restarts and is
+ /// incremented irregularly internally. In general it is best to simply use the current UNIX
+ /// timestamp, however if it is not available a persistent counter that increases once per
+ /// minute should suffice.
///
/// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
///
/// (C-not exported) as we can't export a PeerManager with a dummy channel handler
- pub fn new_routing_only(routing_message_handler: RM, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L) -> Self {
+ pub fn new_routing_only(routing_message_handler: RM, our_node_secret: SecretKey, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L) -> Self {
Self::new(MessageHandler {
chan_handler: ErroringMessageHandler::new(),
route_handler: routing_message_handler,
- }, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{})
+ onion_message_handler: IgnoringMessageHandler{},
+ }, our_node_secret, current_time, ephemeral_random_data, logger, IgnoringMessageHandler{})
}
}
-impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> PeerManager<Descriptor, CM, RM, L, CMH> where
+/// A simple wrapper that optionally prints " from <pubkey>" for an optional pubkey.
+/// This works around `format!()` taking a reference to each argument, preventing
+/// `if let Some(node_id) = peer.their_node_id { format!(.., node_id) } else { .. }` from compiling
+/// due to lifetime errors.
+struct OptionalFromDebugger<'a>(&'a Option<PublicKey>);
+impl core::fmt::Display for OptionalFromDebugger<'_> {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
+ if let Some(node_id) = self.0 { write!(f, " from {}", log_pubkey!(node_id)) } else { Ok(()) }
+ }
+}
+
+/// A function used to filter out local or private addresses
+/// https://www.iana.org./assignments/ipv4-address-space/ipv4-address-space.xhtml
+/// https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml
+fn filter_addresses(ip_address: Option<NetAddress>) -> Option<NetAddress> {
+ match ip_address{
+ // For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8)
+ Some(NetAddress::IPv4{addr: [10, _, _, _], port: _}) => None,
+ // For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8)
+ Some(NetAddress::IPv4{addr: [0, _, _, _], port: _}) => None,
+ // For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10)
+ Some(NetAddress::IPv4{addr: [100, 64..=127, _, _], port: _}) => None,
+ // For IPv4 range 127.0.0.0 - 127.255.255.255 (127/8)
+ Some(NetAddress::IPv4{addr: [127, _, _, _], port: _}) => None,
+ // For IPv4 range 169.254.0.0 - 169.254.255.255 (169.254/16)
+ Some(NetAddress::IPv4{addr: [169, 254, _, _], port: _}) => None,
+ // For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12)
+ Some(NetAddress::IPv4{addr: [172, 16..=31, _, _], port: _}) => None,
+ // For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16)
+ Some(NetAddress::IPv4{addr: [192, 168, _, _], port: _}) => None,
+ // For IPv4 range 192.88.99.0 - 192.88.99.255 (192.88.99/24)
+ Some(NetAddress::IPv4{addr: [192, 88, 99, _], port: _}) => None,
+ // For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3)
+ Some(NetAddress::IPv6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
+ // For remaining addresses
+ Some(NetAddress::IPv6{addr: _, port: _}) => None,
+ Some(..) => ip_address,
+ None => None,
+ }
+}
+
+impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref> PeerManager<Descriptor, CM, RM, OM, L, CMH> where
CM::Target: ChannelMessageHandler,
RM::Target: RoutingMessageHandler,
+ OM::Target: OnionMessageHandler,
L::Target: Logger,
CMH::Target: CustomMessageHandler {
/// Constructs a new PeerManager with the given message handlers and node_id secret key
/// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
- pub fn new(message_handler: MessageHandler<CM, RM>, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self {
+ ///
+ /// `current_time` is used as an always-increasing counter that survives across restarts and is
+ /// incremented irregularly internally. In general it is best to simply use the current UNIX
+ /// timestamp, however if it is not available a persistent counter that increases once per
+ /// minute should suffice.
+ pub fn new(message_handler: MessageHandler<CM, RM, OM>, our_node_secret: SecretKey, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self {
let mut ephemeral_key_midstate = Sha256::engine();
ephemeral_key_midstate.input(ephemeral_random_data);
+ let mut secp_ctx = Secp256k1::signing_only();
+ let ephemeral_hash = Sha256::from_engine(ephemeral_key_midstate.clone()).into_inner();
+ secp_ctx.seeded_randomize(&ephemeral_hash);
+
PeerManager {
message_handler,
- peers: Mutex::new(PeerHolder {
- peers: HashMap::new(),
- node_id_to_descriptor: HashMap::new()
- }),
+ peers: FairRwLock::new(HashMap::new()),
+ node_id_to_descriptor: Mutex::new(HashMap::new()),
+ event_processing_lock: Mutex::new(()),
+ blocked_event_processors: AtomicBool::new(false),
our_node_secret,
ephemeral_key_midstate,
peer_counter: AtomicCounter::new(),
+ last_node_announcement_serial: AtomicU32::new(current_time),
logger,
custom_message_handler,
+ secp_ctx,
}
}
/// new_outbound_connection, however entries will only appear once the initial handshake has
/// completed and we are sure the remote peer has the private key for the given node_id.
pub fn get_peer_node_ids(&self) -> Vec<PublicKey> {
- let peers = self.peers.lock().unwrap();
- peers.peers.values().filter_map(|p| {
+ let peers = self.peers.read().unwrap();
+ peers.values().filter_map(|peer_mutex| {
+ let p = peer_mutex.lock().unwrap();
if !p.channel_encryptor.is_ready_for_encryption() || p.their_features.is_none() {
return None;
}
SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!")
}
- /// Indicates a new outbound connection has been established to a node with the given node_id.
- /// Note that if an Err is returned here you MUST NOT call socket_disconnected for the new
- /// descriptor but must disconnect the connection immediately.
+ /// Indicates a new outbound connection has been established to a node with the given node_id
+ /// and an optional remote network address.
+ ///
+ /// The remote network address adds the option to report a remote IP address back to a connecting
+ /// peer using the init message.
+ /// The user should pass the remote network address of the host they are connected to.
+ ///
+ /// If an `Err` is returned here you must disconnect the connection immediately.
///
/// Returns a small number of bytes to send to the remote node (currently always 50).
///
/// [`socket_disconnected()`].
///
/// [`socket_disconnected()`]: PeerManager::socket_disconnected
- pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor) -> Result<Vec<u8>, PeerHandleError> {
+ pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<Vec<u8>, PeerHandleError> {
let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
- let res = peer_encryptor.get_act_one().to_vec();
+ let res = peer_encryptor.get_act_one(&self.secp_ctx).to_vec();
let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
- let mut peers = self.peers.lock().unwrap();
- if peers.peers.insert(descriptor, Peer {
+ let mut peers = self.peers.write().unwrap();
+ if peers.insert(descriptor, Mutex::new(Peer {
channel_encryptor: peer_encryptor,
their_node_id: None,
their_features: None,
+ their_net_address: remote_network_address,
pending_outbound_buffer: LinkedList::new(),
pending_outbound_buffer_first_msg_offset: 0,
+ gossip_broadcast_buffer: LinkedList::new(),
awaiting_write_event: false,
pending_read_buffer,
sync_status: InitSyncTracker::NoSyncRequested,
- awaiting_pong: false,
- }).is_some() {
+ msgs_sent_since_pong: 0,
+ awaiting_pong_timer_tick_intervals: 0,
+ received_message_since_timer_tick: false,
+ sent_gossip_timestamp_filter: false,
+ })).is_some() {
panic!("PeerManager driver duplicated descriptors!");
};
Ok(res)
}
- /// Indicates a new inbound connection has been established.
+ /// Indicates a new inbound connection has been established to a node with an optional remote
+ /// network address.
+ ///
+ /// The remote network address adds the option to report a remote IP address back to a connecting
+ /// peer using the init message.
+ /// The user should pass the remote network address of the host they are connected to.
///
/// May refuse the connection by returning an Err, but will never write bytes to the remote end
- /// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT
- /// call socket_disconnected for the new descriptor but must disconnect the connection
- /// immediately.
+ /// (outbound connector always speaks first). If an `Err` is returned here you must disconnect
+ /// the connection immediately.
///
/// Panics if descriptor is duplicative with some other descriptor which has not yet been
/// [`socket_disconnected()`].
///
/// [`socket_disconnected()`]: PeerManager::socket_disconnected
- pub fn new_inbound_connection(&self, descriptor: Descriptor) -> Result<(), PeerHandleError> {
- let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret);
+ pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<(), PeerHandleError> {
+ let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret, &self.secp_ctx);
let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
- let mut peers = self.peers.lock().unwrap();
- if peers.peers.insert(descriptor, Peer {
+ let mut peers = self.peers.write().unwrap();
+ if peers.insert(descriptor, Mutex::new(Peer {
channel_encryptor: peer_encryptor,
their_node_id: None,
their_features: None,
+ their_net_address: remote_network_address,
pending_outbound_buffer: LinkedList::new(),
pending_outbound_buffer_first_msg_offset: 0,
+ gossip_broadcast_buffer: LinkedList::new(),
awaiting_write_event: false,
pending_read_buffer,
sync_status: InitSyncTracker::NoSyncRequested,
- awaiting_pong: false,
- }).is_some() {
+ msgs_sent_since_pong: 0,
+ awaiting_pong_timer_tick_intervals: 0,
+ received_message_since_timer_tick: false,
+ sent_gossip_timestamp_filter: false,
+ })).is_some() {
panic!("PeerManager driver duplicated descriptors!");
};
Ok(())
fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) {
while !peer.awaiting_write_event {
- if peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE {
+ if peer.should_buffer_onion_message() {
+ if let Some(peer_node_id) = peer.their_node_id {
+ if let Some(next_onion_message) =
+ self.message_handler.onion_message_handler.next_onion_message_for_peer(peer_node_id) {
+ self.enqueue_message(peer, &next_onion_message);
+ }
+ }
+ }
+ if peer.should_buffer_gossip_broadcast() {
+ if let Some(msg) = peer.gossip_broadcast_buffer.pop_front() {
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_buffer(&msg[..]));
+ }
+ }
+ if peer.should_buffer_gossip_backfill() {
match peer.sync_status {
InitSyncTracker::NoSyncRequested => {},
InitSyncTracker::ChannelsSyncing(c) if c < 0xffff_ffff_ffff_ffff => {
- let steps = ((OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len() + 2) / 3) as u8;
- let all_messages = self.message_handler.route_handler.get_next_channel_announcements(c, steps);
- for &(ref announce, ref update_a_option, ref update_b_option) in all_messages.iter() {
- self.enqueue_message(peer, announce);
- if let &Some(ref update_a) = update_a_option {
- self.enqueue_message(peer, update_a);
+ if let Some((announce, update_a_option, update_b_option)) =
+ self.message_handler.route_handler.get_next_channel_announcement(c)
+ {
+ self.enqueue_message(peer, &announce);
+ if let Some(update_a) = update_a_option {
+ self.enqueue_message(peer, &update_a);
}
- if let &Some(ref update_b) = update_b_option {
- self.enqueue_message(peer, update_b);
+ if let Some(update_b) = update_b_option {
+ self.enqueue_message(peer, &update_b);
}
peer.sync_status = InitSyncTracker::ChannelsSyncing(announce.contents.short_channel_id + 1);
- }
- if all_messages.is_empty() || all_messages.len() != steps as usize {
+ } else {
peer.sync_status = InitSyncTracker::ChannelsSyncing(0xffff_ffff_ffff_ffff);
}
},
InitSyncTracker::ChannelsSyncing(c) if c == 0xffff_ffff_ffff_ffff => {
- let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
- let all_messages = self.message_handler.route_handler.get_next_node_announcements(None, steps);
- for msg in all_messages.iter() {
- self.enqueue_message(peer, msg);
+ if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(None) {
+ self.enqueue_message(peer, &msg);
peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
- }
- if all_messages.is_empty() || all_messages.len() != steps as usize {
+ } else {
peer.sync_status = InitSyncTracker::NoSyncRequested;
}
},
InitSyncTracker::ChannelsSyncing(_) => unreachable!(),
InitSyncTracker::NodesSyncing(key) => {
- let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
- let all_messages = self.message_handler.route_handler.get_next_node_announcements(Some(&key), steps);
- for msg in all_messages.iter() {
- self.enqueue_message(peer, msg);
+ if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(Some(&key)) {
+ self.enqueue_message(peer, &msg);
peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
- }
- if all_messages.is_empty() || all_messages.len() != steps as usize {
+ } else {
peer.sync_status = InitSyncTracker::NoSyncRequested;
}
},
}
}
+ if peer.msgs_sent_since_pong >= BUFFER_DRAIN_MSGS_PER_TICK {
+ self.maybe_send_extra_ping(peer);
+ }
- if {
- let next_buff = match peer.pending_outbound_buffer.front() {
- None => return,
- Some(buff) => buff,
- };
+ let next_buff = match peer.pending_outbound_buffer.front() {
+ None => return,
+ Some(buff) => buff,
+ };
- let should_be_reading = peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE;
- let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
- let data_sent = descriptor.send_data(pending, should_be_reading);
- peer.pending_outbound_buffer_first_msg_offset += data_sent;
- if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() { true } else { false }
- } {
+ let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
+ let data_sent = descriptor.send_data(pending, peer.should_read());
+ peer.pending_outbound_buffer_first_msg_offset += data_sent;
+ if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() {
peer.pending_outbound_buffer_first_msg_offset = 0;
peer.pending_outbound_buffer.pop_front();
} else {
/// [`send_data`]: SocketDescriptor::send_data
/// [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail
pub fn write_buffer_space_avail(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
- let mut peers = self.peers.lock().unwrap();
- match peers.peers.get_mut(descriptor) {
+ let peers = self.peers.read().unwrap();
+ match peers.get(descriptor) {
None => {
// This is most likely a simple race condition where the user found that the socket
// was writeable, then we told the user to `disconnect_socket()`, then they called
// this method. Return an error to make sure we get disconnected.
return Err(PeerHandleError { no_connection_possible: false });
},
- Some(peer) => {
+ Some(peer_mutex) => {
+ let mut peer = peer_mutex.lock().unwrap();
peer.awaiting_write_event = false;
- self.do_attempt_write_data(descriptor, peer);
+ self.do_attempt_write_data(descriptor, &mut peer);
}
};
Ok(())
}
}
- /// Append a message to a peer's pending outbound/write buffer, and update the map of peers needing sends accordingly.
+ /// Append a message to a peer's pending outbound/write buffer
fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
- let mut buffer = VecWriter(Vec::with_capacity(2048));
- wire::write(message, &mut buffer).unwrap(); // crash if the write failed
- let encoded_message = buffer.0;
+ if is_gossip_msg(message.type_id()) {
+ log_gossip!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap()));
+ } else {
+ log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap()))
+ }
+ peer.msgs_sent_since_pong += 1;
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(message));
+ }
- log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap()));
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..]));
+ /// Append a message to a peer's pending outbound/write gossip broadcast buffer
+ fn enqueue_encoded_gossip_broadcast(&self, peer: &mut Peer, encoded_message: Vec<u8>) {
+ peer.msgs_sent_since_pong += 1;
+ peer.gossip_broadcast_buffer.push_back(encoded_message);
}
fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result<bool, PeerHandleError> {
- let pause_read = {
- let mut peers_lock = self.peers.lock().unwrap();
- let peers = &mut *peers_lock;
- let mut msgs_to_forward = Vec::new();
- let mut peer_node_id = None;
- let pause_read = match peers.peers.get_mut(peer_descriptor) {
- None => {
- // This is most likely a simple race condition where the user read some bytes
- // from the socket, then we told the user to `disconnect_socket()`, then they
- // called this method. Return an error to make sure we get disconnected.
- return Err(PeerHandleError { no_connection_possible: false });
- },
- Some(peer) => {
+ let mut pause_read = false;
+ let peers = self.peers.read().unwrap();
+ let mut msgs_to_forward = Vec::new();
+ let mut peer_node_id = None;
+ match peers.get(peer_descriptor) {
+ None => {
+ // This is most likely a simple race condition where the user read some bytes
+ // from the socket, then we told the user to `disconnect_socket()`, then they
+ // called this method. Return an error to make sure we get disconnected.
+ return Err(PeerHandleError { no_connection_possible: false });
+ },
+ Some(peer_mutex) => {
+ let mut read_pos = 0;
+ while read_pos < data.len() {
+ macro_rules! try_potential_handleerror {
+ ($peer: expr, $thing: expr) => {
+ match $thing {
+ Ok(x) => x,
+ Err(e) => {
+ match e.action {
+ msgs::ErrorAction::DisconnectPeer { msg: _ } => {
+ //TODO: Try to push msg
+ log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+ return Err(PeerHandleError{ no_connection_possible: false });
+ },
+ msgs::ErrorAction::IgnoreAndLog(level) => {
+ log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
+ continue
+ },
+ msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these
+ msgs::ErrorAction::IgnoreError => {
+ log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
+ continue;
+ },
+ msgs::ErrorAction::SendErrorMessage { msg } => {
+ log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+ self.enqueue_message($peer, &msg);
+ continue;
+ },
+ msgs::ErrorAction::SendWarningMessage { msg, log_level } => {
+ log_given_level!(self.logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+ self.enqueue_message($peer, &msg);
+ continue;
+ },
+ }
+ }
+ }
+ }
+ }
+
+ let mut peer_lock = peer_mutex.lock().unwrap();
+ let peer = &mut *peer_lock;
+ let mut msg_to_handle = None;
+ if peer_node_id.is_none() {
+ peer_node_id = peer.their_node_id.clone();
+ }
+
assert!(peer.pending_read_buffer.len() > 0);
assert!(peer.pending_read_buffer.len() > peer.pending_read_buffer_pos);
- let mut read_pos = 0;
- while read_pos < data.len() {
- {
- let data_to_copy = cmp::min(peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, data.len() - read_pos);
- peer.pending_read_buffer[peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy].copy_from_slice(&data[read_pos..read_pos + data_to_copy]);
- read_pos += data_to_copy;
- peer.pending_read_buffer_pos += data_to_copy;
+ {
+ let data_to_copy = cmp::min(peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, data.len() - read_pos);
+ peer.pending_read_buffer[peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy].copy_from_slice(&data[read_pos..read_pos + data_to_copy]);
+ read_pos += data_to_copy;
+ peer.pending_read_buffer_pos += data_to_copy;
+ }
+
+ if peer.pending_read_buffer_pos == peer.pending_read_buffer.len() {
+ peer.pending_read_buffer_pos = 0;
+
+ macro_rules! insert_node_id {
+ () => {
+ match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap()) {
+ hash_map::Entry::Occupied(_) => {
+ log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap()));
+ peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
+ return Err(PeerHandleError{ no_connection_possible: false })
+ },
+ hash_map::Entry::Vacant(entry) => {
+ log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap()));
+ entry.insert(peer_descriptor.clone())
+ },
+ };
+ }
}
- if peer.pending_read_buffer_pos == peer.pending_read_buffer.len() {
- peer.pending_read_buffer_pos = 0;
+ let next_step = peer.channel_encryptor.get_noise_step();
+ match next_step {
+ NextNoiseStep::ActOne => {
+ let act_two = try_potential_handleerror!(peer, peer.channel_encryptor
+ .process_act_one_with_keys(&peer.pending_read_buffer[..],
+ &self.our_node_secret, self.get_ephemeral_key(), &self.secp_ctx)).to_vec();
+ peer.pending_outbound_buffer.push_back(act_two);
+ peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long
+ },
+ NextNoiseStep::ActTwo => {
+ let (act_three, their_node_id) = try_potential_handleerror!(peer,
+ peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..],
+ &self.our_node_secret, &self.secp_ctx));
+ peer.pending_outbound_buffer.push_back(act_three.to_vec());
+ peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
+ peer.pending_read_is_header = true;
+
+ peer.their_node_id = Some(their_node_id);
+ insert_node_id!();
+ let features = self.message_handler.chan_handler.provided_init_features(&their_node_id)
+ .or(self.message_handler.route_handler.provided_init_features(&their_node_id))
+ .or(self.message_handler.onion_message_handler.provided_init_features(&their_node_id));
+ let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+ self.enqueue_message(peer, &resp);
+ peer.awaiting_pong_timer_tick_intervals = 0;
+ },
+ NextNoiseStep::ActThree => {
+ let their_node_id = try_potential_handleerror!(peer,
+ peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..]));
+ peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
+ peer.pending_read_is_header = true;
+ peer.their_node_id = Some(their_node_id);
+ insert_node_id!();
+ let features = self.message_handler.chan_handler.provided_init_features(&their_node_id)
+ .or(self.message_handler.route_handler.provided_init_features(&their_node_id))
+ .or(self.message_handler.onion_message_handler.provided_init_features(&their_node_id));
+ let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+ self.enqueue_message(peer, &resp);
+ peer.awaiting_pong_timer_tick_intervals = 0;
+ },
+ NextNoiseStep::NoiseComplete => {
+ if peer.pending_read_is_header {
+ let msg_len = try_potential_handleerror!(peer,
+ peer.channel_encryptor.decrypt_length_header(&peer.pending_read_buffer[..]));
+ if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); }
+ peer.pending_read_buffer.resize(msg_len as usize + 16, 0);
+ if msg_len < 2 { // Need at least the message type tag
+ return Err(PeerHandleError{ no_connection_possible: false });
+ }
+ peer.pending_read_is_header = false;
+ } else {
+ let msg_data = try_potential_handleerror!(peer,
+ peer.channel_encryptor.decrypt_message(&peer.pending_read_buffer[..]));
+ assert!(msg_data.len() >= 2);
+
+ // Reset read buffer
+ if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); }
+ peer.pending_read_buffer.resize(18, 0);
+ peer.pending_read_is_header = true;
- macro_rules! try_potential_handleerror {
- ($thing: expr) => {
- match $thing {
+ let mut reader = io::Cursor::new(&msg_data[..]);
+ let message_result = wire::read(&mut reader, &*self.custom_message_handler);
+ let message = match message_result {
Ok(x) => x,
Err(e) => {
- match e.action {
- msgs::ErrorAction::DisconnectPeer { msg: _ } => {
- //TODO: Try to push msg
- log_debug!(self.logger, "Error handling message; disconnecting peer with: {}", e.err);
- return Err(PeerHandleError{ no_connection_possible: false });
- },
- msgs::ErrorAction::IgnoreAndLog(level) => {
- log_given_level!(self.logger, level, "Error handling message; ignoring: {}", e.err);
- continue
- },
- msgs::ErrorAction::IgnoreError => {
- log_debug!(self.logger, "Error handling message; ignoring: {}", e.err);
+ match e {
+ // Note that to avoid recursion we never call
+ // `do_attempt_write_data` from here, causing
+ // the messages enqueued here to not actually
+ // be sent before the peer is disconnected.
+ (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => {
+ log_gossip!(self.logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!");
continue;
- },
- msgs::ErrorAction::SendErrorMessage { msg } => {
- log_debug!(self.logger, "Error handling message; sending error message with: {}", e.err);
- self.enqueue_message(peer, &msg);
+ }
+ (msgs::DecodeError::UnsupportedCompression, _) => {
+ log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message");
+ self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unsupported message compression: zlib".to_owned() });
continue;
- },
+ }
+ (_, Some(ty)) if is_gossip_msg(ty) => {
+ log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message");
+ self.enqueue_message(peer, &msgs::WarningMessage {
+ channel_id: [0; 32],
+ data: format!("Unreadable/bogus gossip message of type {}", ty),
+ });
+ continue;
+ }
+ (msgs::DecodeError::UnknownRequiredFeature, ty) => {
+ log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
+ self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) });
+ return Err(PeerHandleError { no_connection_possible: false });
+ }
+ (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }),
+ (msgs::DecodeError::InvalidValue, _) => {
+ log_debug!(self.logger, "Got an invalid value while deserializing message");
+ return Err(PeerHandleError { no_connection_possible: false });
+ }
+ (msgs::DecodeError::ShortRead, _) => {
+ log_debug!(self.logger, "Deserialization failed due to shortness of message");
+ return Err(PeerHandleError { no_connection_possible: false });
+ }
+ (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }),
+ (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }),
}
}
- }
- }
- }
-
- macro_rules! insert_node_id {
- () => {
- match peers.node_id_to_descriptor.entry(peer.their_node_id.unwrap()) {
- hash_map::Entry::Occupied(_) => {
- log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap()));
- peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
- return Err(PeerHandleError{ no_connection_possible: false })
- },
- hash_map::Entry::Vacant(entry) => {
- log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap()));
- entry.insert(peer_descriptor.clone())
- },
};
- }
- }
-
- let next_step = peer.channel_encryptor.get_noise_step();
- match next_step {
- NextNoiseStep::ActOne => {
- let act_two = try_potential_handleerror!(peer.channel_encryptor.process_act_one_with_keys(&peer.pending_read_buffer[..], &self.our_node_secret, self.get_ephemeral_key())).to_vec();
- peer.pending_outbound_buffer.push_back(act_two);
- peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long
- },
- NextNoiseStep::ActTwo => {
- let (act_three, their_node_id) = try_potential_handleerror!(peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..], &self.our_node_secret));
- peer.pending_outbound_buffer.push_back(act_three.to_vec());
- peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
- peer.pending_read_is_header = true;
- peer.their_node_id = Some(their_node_id);
- insert_node_id!();
- let features = InitFeatures::known();
- let resp = msgs::Init { features };
- self.enqueue_message(peer, &resp);
- },
- NextNoiseStep::ActThree => {
- let their_node_id = try_potential_handleerror!(peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..]));
- peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
- peer.pending_read_is_header = true;
- peer.their_node_id = Some(their_node_id);
- insert_node_id!();
- let features = InitFeatures::known();
- let resp = msgs::Init { features };
- self.enqueue_message(peer, &resp);
- },
- NextNoiseStep::NoiseComplete => {
- if peer.pending_read_is_header {
- let msg_len = try_potential_handleerror!(peer.channel_encryptor.decrypt_length_header(&peer.pending_read_buffer[..]));
- peer.pending_read_buffer = Vec::with_capacity(msg_len as usize + 16);
- peer.pending_read_buffer.resize(msg_len as usize + 16, 0);
- if msg_len < 2 { // Need at least the message type tag
- return Err(PeerHandleError{ no_connection_possible: false });
- }
- peer.pending_read_is_header = false;
- } else {
- let msg_data = try_potential_handleerror!(peer.channel_encryptor.decrypt_message(&peer.pending_read_buffer[..]));
- assert!(msg_data.len() >= 2);
-
- // Reset read buffer
- peer.pending_read_buffer = [0; 18].to_vec();
- peer.pending_read_is_header = true;
-
- let mut reader = io::Cursor::new(&msg_data[..]);
- let message_result = wire::read(&mut reader, &*self.custom_message_handler);
- let message = match message_result {
- Ok(x) => x,
- Err(e) => {
- match e {
- msgs::DecodeError::UnknownVersion => return Err(PeerHandleError { no_connection_possible: false }),
- msgs::DecodeError::UnknownRequiredFeature => {
- log_trace!(self.logger, "Got a channel/node announcement with an known required feature flag, you may want to update!");
- continue;
- }
- msgs::DecodeError::InvalidValue => {
- log_debug!(self.logger, "Got an invalid value while deserializing message");
- return Err(PeerHandleError { no_connection_possible: false });
- }
- msgs::DecodeError::ShortRead => {
- log_debug!(self.logger, "Deserialization failed due to shortness of message");
- return Err(PeerHandleError { no_connection_possible: false });
- }
- msgs::DecodeError::BadLengthDescriptor => return Err(PeerHandleError { no_connection_possible: false }),
- msgs::DecodeError::Io(_) => return Err(PeerHandleError { no_connection_possible: false }),
- msgs::DecodeError::UnsupportedCompression => {
- log_trace!(self.logger, "We don't support zlib-compressed message fields, ignoring message");
- continue;
- }
- }
- }
- };
-
- match self.handle_message(peer, message) {
- Err(handling_error) => match handling_error {
- MessageHandlingError::PeerHandleError(e) => { return Err(e) },
- MessageHandlingError::LightningError(e) => {
- try_potential_handleerror!(Err(e));
- },
- },
- Ok(Some(msg)) => {
- peer_node_id = Some(peer.their_node_id.expect("After noise is complete, their_node_id is always set"));
- msgs_to_forward.push(msg);
- },
- Ok(None) => {},
- }
- }
+ msg_to_handle = Some(message);
}
}
}
}
-
- peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_READ_PAUSE // pause_read
+ pause_read = !peer.should_read();
+
+ if let Some(message) = msg_to_handle {
+ match self.handle_message(&peer_mutex, peer_lock, message) {
+ Err(handling_error) => match handling_error {
+ MessageHandlingError::PeerHandleError(e) => { return Err(e) },
+ MessageHandlingError::LightningError(e) => {
+ try_potential_handleerror!(&mut peer_mutex.lock().unwrap(), Err(e));
+ },
+ },
+ Ok(Some(msg)) => {
+ msgs_to_forward.push(msg);
+ },
+ Ok(None) => {},
+ }
+ }
}
- };
-
- for msg in msgs_to_forward.drain(..) {
- self.forward_broadcast_msg(peers, &msg, peer_node_id.as_ref());
}
+ }
- pause_read
- };
+ for msg in msgs_to_forward.drain(..) {
+ self.forward_broadcast_msg(&*peers, &msg, peer_node_id.as_ref());
+ }
Ok(pause_read)
}
/// Returns the message back if it needs to be broadcasted to all other peers.
fn handle_message(
&self,
- peer: &mut Peer,
+ peer_mutex: &Mutex<Peer>,
+ mut peer_lock: MutexGuard<Peer>,
message: wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>
) -> Result<Option<wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
- log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap()));
+ let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages");
+ peer_lock.received_message_since_timer_tick = true;
// Need an Init as first message
- if let wire::Message::Init(_) = message {
- } else if peer.their_features.is_none() {
- log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(peer.their_node_id.unwrap()));
+ if let wire::Message::Init(msg) = message {
+ if msg.features.requires_unknown_bits() {
+ log_debug!(self.logger, "Peer features required unknown version bits");
+ return Err(PeerHandleError{ no_connection_possible: true }.into());
+ }
+ if peer_lock.their_features.is_some() {
+ return Err(PeerHandleError{ no_connection_possible: false }.into());
+ }
+
+ log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
+
+ // For peers not supporting gossip queries start sync now, otherwise wait until we receive a filter.
+ if msg.features.initial_routing_sync() && !msg.features.supports_gossip_queries() {
+ peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0);
+ }
+
+ if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg) {
+ log_debug!(self.logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+ return Err(PeerHandleError{ no_connection_possible: true }.into());
+ }
+ if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg) {
+ log_debug!(self.logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+ return Err(PeerHandleError{ no_connection_possible: true }.into());
+ }
+ if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg) {
+ log_debug!(self.logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+ return Err(PeerHandleError{ no_connection_possible: true }.into());
+ }
+
+ peer_lock.their_features = Some(msg.features);
+ return Ok(None);
+ } else if peer_lock.their_features.is_none() {
+ log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
return Err(PeerHandleError{ no_connection_possible: false }.into());
}
+ if let wire::Message::GossipTimestampFilter(_msg) = message {
+ // When supporting gossip messages, start inital gossip sync only after we receive
+ // a GossipTimestampFilter
+ if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() &&
+ !peer_lock.sent_gossip_timestamp_filter {
+ peer_lock.sent_gossip_timestamp_filter = true;
+ peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0);
+ }
+ return Ok(None);
+ }
+
+ let their_features = peer_lock.their_features.clone();
+ mem::drop(peer_lock);
+
+ if is_gossip_msg(message.type_id()) {
+ log_gossip!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
+ } else {
+ log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
+ }
+
let mut should_forward = None;
match message {
// Setup and Control messages:
- wire::Message::Init(msg) => {
- if msg.features.requires_unknown_bits() {
- log_debug!(self.logger, "Peer features required unknown version bits");
- return Err(PeerHandleError{ no_connection_possible: true }.into());
- }
- if peer.their_features.is_some() {
- return Err(PeerHandleError{ no_connection_possible: false }.into());
+ wire::Message::Init(_) => {
+ // Handled above
+ },
+ wire::Message::GossipTimestampFilter(_) => {
+ // Handled above
+ },
+ wire::Message::Error(msg) => {
+ let mut data_is_printable = true;
+ for b in msg.data.bytes() {
+ if b < 32 || b > 126 {
+ data_is_printable = false;
+ break;
+ }
}
- log_info!(self.logger, "Received peer Init message: {}", msg.features);
-
- if msg.features.initial_routing_sync() {
- peer.sync_status = InitSyncTracker::ChannelsSyncing(0);
+ if data_is_printable {
+ log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), msg.data);
+ } else {
+ log_debug!(self.logger, "Got Err message from {} with non-ASCII error message", log_pubkey!(their_node_id));
}
- if !msg.features.supports_static_remote_key() {
- log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(peer.their_node_id.unwrap()));
+ self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
+ if msg.channel_id == [0; 32] {
return Err(PeerHandleError{ no_connection_possible: true }.into());
}
-
- self.message_handler.route_handler.sync_routing_table(&peer.their_node_id.unwrap(), &msg);
-
- self.message_handler.chan_handler.peer_connected(&peer.their_node_id.unwrap(), &msg);
- peer.their_features = Some(msg.features);
},
- wire::Message::Error(msg) => {
+ wire::Message::Warning(msg) => {
let mut data_is_printable = true;
for b in msg.data.bytes() {
if b < 32 || b > 126 {
}
if data_is_printable {
- log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.data);
+ log_debug!(self.logger, "Got warning message from {}: {}", log_pubkey!(their_node_id), msg.data);
} else {
- log_debug!(self.logger, "Got Err message from {} with non-ASCII error message", log_pubkey!(peer.their_node_id.unwrap()));
- }
- self.message_handler.chan_handler.handle_error(&peer.their_node_id.unwrap(), &msg);
- if msg.channel_id == [0; 32] {
- return Err(PeerHandleError{ no_connection_possible: true }.into());
+ log_debug!(self.logger, "Got warning message from {} with non-ASCII error message", log_pubkey!(their_node_id));
}
},
wire::Message::Ping(msg) => {
if msg.ponglen < 65532 {
let resp = msgs::Pong { byteslen: msg.ponglen };
- self.enqueue_message(peer, &resp);
+ self.enqueue_message(&mut *peer_mutex.lock().unwrap(), &resp);
}
},
wire::Message::Pong(_msg) => {
- peer.awaiting_pong = false;
+ let mut peer_lock = peer_mutex.lock().unwrap();
+ peer_lock.awaiting_pong_timer_tick_intervals = 0;
+ peer_lock.msgs_sent_since_pong = 0;
},
// Channel messages:
wire::Message::OpenChannel(msg) => {
- self.message_handler.chan_handler.handle_open_channel(&peer.their_node_id.unwrap(), peer.their_features.clone().unwrap(), &msg);
+ self.message_handler.chan_handler.handle_open_channel(&their_node_id, their_features.clone().unwrap(), &msg);
},
wire::Message::AcceptChannel(msg) => {
- self.message_handler.chan_handler.handle_accept_channel(&peer.their_node_id.unwrap(), peer.their_features.clone().unwrap(), &msg);
+ self.message_handler.chan_handler.handle_accept_channel(&their_node_id, their_features.clone().unwrap(), &msg);
},
wire::Message::FundingCreated(msg) => {
- self.message_handler.chan_handler.handle_funding_created(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_funding_created(&their_node_id, &msg);
},
wire::Message::FundingSigned(msg) => {
- self.message_handler.chan_handler.handle_funding_signed(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_funding_signed(&their_node_id, &msg);
},
- wire::Message::FundingLocked(msg) => {
- self.message_handler.chan_handler.handle_funding_locked(&peer.their_node_id.unwrap(), &msg);
+ wire::Message::ChannelReady(msg) => {
+ self.message_handler.chan_handler.handle_channel_ready(&their_node_id, &msg);
},
wire::Message::Shutdown(msg) => {
- self.message_handler.chan_handler.handle_shutdown(&peer.their_node_id.unwrap(), peer.their_features.as_ref().unwrap(), &msg);
+ self.message_handler.chan_handler.handle_shutdown(&their_node_id, their_features.as_ref().unwrap(), &msg);
},
wire::Message::ClosingSigned(msg) => {
- self.message_handler.chan_handler.handle_closing_signed(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_closing_signed(&their_node_id, &msg);
},
// Commitment messages:
wire::Message::UpdateAddHTLC(msg) => {
- self.message_handler.chan_handler.handle_update_add_htlc(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_update_add_htlc(&their_node_id, &msg);
},
wire::Message::UpdateFulfillHTLC(msg) => {
- self.message_handler.chan_handler.handle_update_fulfill_htlc(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_update_fulfill_htlc(&their_node_id, &msg);
},
wire::Message::UpdateFailHTLC(msg) => {
- self.message_handler.chan_handler.handle_update_fail_htlc(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_update_fail_htlc(&their_node_id, &msg);
},
wire::Message::UpdateFailMalformedHTLC(msg) => {
- self.message_handler.chan_handler.handle_update_fail_malformed_htlc(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_update_fail_malformed_htlc(&their_node_id, &msg);
},
wire::Message::CommitmentSigned(msg) => {
- self.message_handler.chan_handler.handle_commitment_signed(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_commitment_signed(&their_node_id, &msg);
},
wire::Message::RevokeAndACK(msg) => {
- self.message_handler.chan_handler.handle_revoke_and_ack(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_revoke_and_ack(&their_node_id, &msg);
},
wire::Message::UpdateFee(msg) => {
- self.message_handler.chan_handler.handle_update_fee(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_update_fee(&their_node_id, &msg);
},
wire::Message::ChannelReestablish(msg) => {
- self.message_handler.chan_handler.handle_channel_reestablish(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_channel_reestablish(&their_node_id, &msg);
},
// Routing messages:
wire::Message::AnnouncementSignatures(msg) => {
- self.message_handler.chan_handler.handle_announcement_signatures(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_announcement_signatures(&their_node_id, &msg);
},
wire::Message::ChannelAnnouncement(msg) => {
if self.message_handler.route_handler.handle_channel_announcement(&msg)
}
},
wire::Message::ChannelUpdate(msg) => {
- self.message_handler.chan_handler.handle_channel_update(&peer.their_node_id.unwrap(), &msg);
+ self.message_handler.chan_handler.handle_channel_update(&their_node_id, &msg);
if self.message_handler.route_handler.handle_channel_update(&msg)
.map_err(|e| -> MessageHandlingError { e.into() })? {
should_forward = Some(wire::Message::ChannelUpdate(msg));
}
},
wire::Message::QueryShortChannelIds(msg) => {
- self.message_handler.route_handler.handle_query_short_channel_ids(&peer.their_node_id.unwrap(), msg)?;
+ self.message_handler.route_handler.handle_query_short_channel_ids(&their_node_id, msg)?;
},
wire::Message::ReplyShortChannelIdsEnd(msg) => {
- self.message_handler.route_handler.handle_reply_short_channel_ids_end(&peer.their_node_id.unwrap(), msg)?;
+ self.message_handler.route_handler.handle_reply_short_channel_ids_end(&their_node_id, msg)?;
},
wire::Message::QueryChannelRange(msg) => {
- self.message_handler.route_handler.handle_query_channel_range(&peer.their_node_id.unwrap(), msg)?;
+ self.message_handler.route_handler.handle_query_channel_range(&their_node_id, msg)?;
},
wire::Message::ReplyChannelRange(msg) => {
- self.message_handler.route_handler.handle_reply_channel_range(&peer.their_node_id.unwrap(), msg)?;
+ self.message_handler.route_handler.handle_reply_channel_range(&their_node_id, msg)?;
},
- wire::Message::GossipTimestampFilter(_msg) => {
- // TODO: handle message
+
+ // Onion message:
+ wire::Message::OnionMessage(msg) => {
+ self.message_handler.onion_message_handler.handle_onion_message(&their_node_id, &msg);
},
// Unknown messages:
log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id);
},
wire::Message::Custom(custom) => {
- self.custom_message_handler.handle_custom_message(custom, &peer.their_node_id.unwrap())?;
+ self.custom_message_handler.handle_custom_message(custom, &their_node_id)?;
},
};
Ok(should_forward)
}
- fn forward_broadcast_msg(&self, peers: &mut PeerHolder<Descriptor>, msg: &wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
+ fn forward_broadcast_msg(&self, peers: &HashMap<Descriptor, Mutex<Peer>>, msg: &wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
match msg {
wire::Message::ChannelAnnouncement(ref msg) => {
- log_trace!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg);
+ log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg);
let encoded_msg = encode_msg!(msg);
- for (_, peer) in peers.peers.iter_mut() {
+ for (_, peer_mutex) in peers.iter() {
+ let mut peer = peer_mutex.lock().unwrap();
if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
- if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP {
- log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
+ if peer.buffer_full_drop_gossip_broadcast() {
+ log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
if peer.their_node_id.as_ref() == Some(&msg.contents.node_id_1) ||
if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
continue;
}
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_msg[..]));
+ self.enqueue_encoded_gossip_broadcast(&mut *peer, encoded_msg.clone());
}
},
wire::Message::NodeAnnouncement(ref msg) => {
- log_trace!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg);
+ log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg);
let encoded_msg = encode_msg!(msg);
- for (_, peer) in peers.peers.iter_mut() {
+ for (_, peer_mutex) in peers.iter() {
+ let mut peer = peer_mutex.lock().unwrap();
if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
!peer.should_forward_node_announcement(msg.contents.node_id) {
continue
}
- if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP {
- log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
+ if peer.buffer_full_drop_gossip_broadcast() {
+ log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
if peer.their_node_id.as_ref() == Some(&msg.contents.node_id) {
if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
continue;
}
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_msg[..]));
+ self.enqueue_encoded_gossip_broadcast(&mut *peer, encoded_msg.clone());
}
},
wire::Message::ChannelUpdate(ref msg) => {
- log_trace!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg);
+ log_gossip!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg);
let encoded_msg = encode_msg!(msg);
- for (_, peer) in peers.peers.iter_mut() {
+ for (_, peer_mutex) in peers.iter() {
+ let mut peer = peer_mutex.lock().unwrap();
if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
- if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP {
- log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
+ if peer.buffer_full_drop_gossip_broadcast() {
+ log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
continue;
}
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_msg[..]));
+ self.enqueue_encoded_gossip_broadcast(&mut *peer, encoded_msg.clone());
}
},
_ => debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages"),
/// You don't have to call this function explicitly if you are using [`lightning-net-tokio`]
/// or one of the other clients provided in our language bindings.
///
+ /// Note that if there are any other calls to this function waiting on lock(s) this may return
+ /// without doing any work. All available events that need handling will be handled before the
+ /// other calls return.
+ ///
/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
/// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards
/// [`send_data`]: SocketDescriptor::send_data
pub fn process_events(&self) {
+ let mut _single_processor_lock = self.event_processing_lock.try_lock();
+ if _single_processor_lock.is_err() {
+ // While we could wake the older sleeper here with a CV and make more even waiting
+ // times, that would be a lot of overengineering for a simple "reduce total waiter
+ // count" goal.
+ match self.blocked_event_processors.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) {
+ Err(val) => {
+ debug_assert!(val, "compare_exchange failed spuriously?");
+ return;
+ },
+ Ok(val) => {
+ debug_assert!(!val, "compare_exchange succeeded spuriously?");
+ // We're the only waiter, as the running process_events may have emptied the
+ // pending events "long" ago and there are new events for us to process, wait until
+ // its done and process any leftover events before returning.
+ _single_processor_lock = Ok(self.event_processing_lock.lock().unwrap());
+ self.blocked_event_processors.store(false, Ordering::Release);
+ }
+ }
+ }
+
+ let mut peers_to_disconnect = HashMap::new();
+ let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
+ events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
+
{
// TODO: There are some DoS attacks here where you can flood someone's outbound send
// buffer by doing things like announcing channels on another node. We should be willing to
// drop optional-ish messages when send buffers get full!
- let mut peers_lock = self.peers.lock().unwrap();
- let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
- events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
- let peers = &mut *peers_lock;
+ let peers_lock = self.peers.read().unwrap();
+ let peers = &*peers_lock;
macro_rules! get_peer_for_forwarding {
($node_id: expr) => {
{
- match peers.node_id_to_descriptor.get($node_id) {
- Some(descriptor) => match peers.peers.get_mut(&descriptor) {
- Some(peer) => {
- if peer.their_features.is_none() {
+ if peers_to_disconnect.get($node_id).is_some() {
+ // If we've "disconnected" this peer, do not send to it.
+ continue;
+ }
+ let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned();
+ match descriptor_opt {
+ Some(descriptor) => match peers.get(&descriptor) {
+ Some(peer_mutex) => {
+ let peer_lock = peer_mutex.lock().unwrap();
+ if peer_lock.their_features.is_none() {
continue;
}
- peer
+ peer_lock
},
- None => panic!("Inconsistent peers set state!"),
+ None => {
+ debug_assert!(false, "Inconsistent peers set state!");
+ continue;
+ }
},
None => {
continue;
log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.temporary_channel_id));
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.temporary_channel_id));
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
// TODO: If the peer is gone we should generate a DiscardFunding event
// indicating to the wallet that they should just throw away this funding transaction
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
- MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}",
+ MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
update_fulfill_htlcs.len(),
update_fail_htlcs.len(),
log_bytes!(commitment_signed.channel_id));
- let peer = get_peer_for_forwarding!(node_id);
+ let mut peer = get_peer_for_forwarding!(node_id);
for msg in update_add_htlcs {
- self.enqueue_message(peer, msg);
+ self.enqueue_message(&mut *peer, msg);
}
for msg in update_fulfill_htlcs {
- self.enqueue_message(peer, msg);
+ self.enqueue_message(&mut *peer, msg);
}
for msg in update_fail_htlcs {
- self.enqueue_message(peer, msg);
+ self.enqueue_message(&mut *peer, msg);
}
for msg in update_fail_malformed_htlcs {
- self.enqueue_message(peer, msg);
+ self.enqueue_message(&mut *peer, msg);
}
if let &Some(ref msg) = update_fee {
- self.enqueue_message(peer, msg);
+ self.enqueue_message(&mut *peer, msg);
}
- self.enqueue_message(peer, commitment_signed);
+ self.enqueue_message(&mut *peer, commitment_signed);
},
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
+ log_debug!(self.logger, "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
+ log_pubkey!(node_id),
+ msg.contents.short_channel_id);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), update_msg);
},
MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => {
log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
- if self.message_handler.route_handler.handle_channel_announcement(&msg).is_ok() && self.message_handler.route_handler.handle_channel_update(&update_msg).is_ok() {
- self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None);
- self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None);
+ match self.message_handler.route_handler.handle_channel_announcement(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None),
+ _ => {},
}
- },
- MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
- log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler");
- if self.message_handler.route_handler.handle_node_announcement(&msg).is_ok() {
- self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None);
+ match self.message_handler.route_handler.handle_channel_update(&update_msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None),
+ _ => {},
}
},
MessageSendEvent::BroadcastChannelUpdate { msg } => {
log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
- if self.message_handler.route_handler.handle_channel_update(&msg).is_ok() {
- self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None);
+ match self.message_handler.route_handler.handle_channel_update(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
+ _ => {},
}
},
MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
log_pubkey!(node_id), msg.contents.short_channel_id);
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::HandleError { ref node_id, ref action } => {
match *action {
msgs::ErrorAction::DisconnectPeer { ref msg } => {
- if let Some(mut descriptor) = peers.node_id_to_descriptor.remove(node_id) {
- if let Some(mut peer) = peers.peers.remove(&descriptor) {
- if let Some(ref msg) = *msg {
- log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
- log_pubkey!(node_id),
- msg.data);
- self.enqueue_message(&mut peer, msg);
- // This isn't guaranteed to work, but if there is enough free
- // room in the send buffer, put the error message there...
- self.do_attempt_write_data(&mut descriptor, &mut peer);
- } else {
- log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
- }
- }
- descriptor.disconnect_socket();
- self.message_handler.chan_handler.peer_disconnected(&node_id, false);
- }
+ // We do not have the peers write lock, so we just store that we're
+ // about to disconenct the peer and do it after we finish
+ // processing most messages.
+ peers_to_disconnect.insert(*node_id, msg.clone());
},
msgs::ErrorAction::IgnoreAndLog(level) => {
log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
},
+ msgs::ErrorAction::IgnoreDuplicateGossip => {},
msgs::ErrorAction::IgnoreError => {
log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
},
log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
log_pubkey!(node_id),
msg.data);
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => {
+ log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
+ log_pubkey!(node_id),
+ msg.data);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
}
},
MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => {
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => {
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
+ log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
log_pubkey!(node_id),
msg.short_channel_ids.len(),
msg.first_blocknum,
msg.number_of_blocks,
msg.sync_complete);
- self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ }
+ MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => {
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
}
}
for (node_id, msg) in self.custom_message_handler.get_and_clear_pending_msg() {
- self.enqueue_message(get_peer_for_forwarding!(&node_id), &msg);
+ if peers_to_disconnect.get(&node_id).is_some() { continue; }
+ self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg);
}
- for (descriptor, peer) in peers.peers.iter_mut() {
- self.do_attempt_write_data(&mut (*descriptor).clone(), peer);
+ for (descriptor, peer_mutex) in peers.iter() {
+ self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
+ }
+ }
+ if !peers_to_disconnect.is_empty() {
+ let mut peers_lock = self.peers.write().unwrap();
+ let peers = &mut *peers_lock;
+ for (node_id, msg) in peers_to_disconnect.drain() {
+ // Note that since we are holding the peers *write* lock we can
+ // remove from node_id_to_descriptor immediately (as no other
+ // thread can be holding the peer lock if we have the global write
+ // lock).
+
+ if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
+ if let Some(peer_mutex) = peers.remove(&descriptor) {
+ if let Some(msg) = msg {
+ log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
+ log_pubkey!(node_id),
+ msg.data);
+ let mut peer = peer_mutex.lock().unwrap();
+ self.enqueue_message(&mut *peer, &msg);
+ // This isn't guaranteed to work, but if there is enough free
+ // room in the send buffer, put the error message there...
+ self.do_attempt_write_data(&mut descriptor, &mut *peer);
+ } else {
+ log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
+ }
+ }
+ descriptor.disconnect_socket();
+ self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
+ }
}
}
}
}
fn disconnect_event_internal(&self, descriptor: &Descriptor, no_connection_possible: bool) {
- let mut peers = self.peers.lock().unwrap();
- let peer_option = peers.peers.remove(descriptor);
+ let mut peers = self.peers.write().unwrap();
+ let peer_option = peers.remove(descriptor);
match peer_option {
None => {
// This is most likely a simple race condition where the user found that the socket
// was disconnected, then we told the user to `disconnect_socket()`, then they
// called this method. Either way we're disconnected, return.
},
- Some(peer) => {
- match peer.their_node_id {
- Some(node_id) => {
- log_trace!(self.logger,
- "Handling disconnection of peer {}, with {}future connection to the peer possible.",
- log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
- peers.node_id_to_descriptor.remove(&node_id);
- self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
- },
- None => {}
+ Some(peer_lock) => {
+ let peer = peer_lock.lock().unwrap();
+ if let Some(node_id) = peer.their_node_id {
+ log_trace!(self.logger,
+ "Handling disconnection of peer {}, with {}future connection to the peer possible.",
+ log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
+ self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+ self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
}
}
};
///
/// [`disconnect_socket`]: SocketDescriptor::disconnect_socket
pub fn disconnect_by_node_id(&self, node_id: PublicKey, no_connection_possible: bool) {
- let mut peers_lock = self.peers.lock().unwrap();
- if let Some(mut descriptor) = peers_lock.node_id_to_descriptor.remove(&node_id) {
+ let mut peers_lock = self.peers.write().unwrap();
+ if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id);
- peers_lock.peers.remove(&descriptor);
+ peers_lock.remove(&descriptor);
self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
+ descriptor.disconnect_socket();
+ }
+ }
+
+ /// Disconnects all currently-connected peers. This is useful on platforms where there may be
+ /// an indication that TCP sockets have stalled even if we weren't around to time them out
+ /// using regular ping/pongs.
+ pub fn disconnect_all_peers(&self) {
+ let mut peers_lock = self.peers.write().unwrap();
+ self.node_id_to_descriptor.lock().unwrap().clear();
+ let peers = &mut *peers_lock;
+ for (mut descriptor, peer) in peers.drain() {
+ if let Some(node_id) = peer.lock().unwrap().their_node_id {
+ log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id);
+ self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
+ }
descriptor.disconnect_socket();
}
}
+ /// This is called when we're blocked on sending additional gossip messages until we receive a
+ /// pong. If we aren't waiting on a pong, we take this opportunity to send a ping (setting
+ /// `awaiting_pong_timer_tick_intervals` to a special flag value to indicate this).
+ fn maybe_send_extra_ping(&self, peer: &mut Peer) {
+ if peer.awaiting_pong_timer_tick_intervals == 0 {
+ peer.awaiting_pong_timer_tick_intervals = -1;
+ let ping = msgs::Ping {
+ ponglen: 0,
+ byteslen: 64,
+ };
+ self.enqueue_message(peer, &ping);
+ }
+ }
+
/// Send pings to each peer and disconnect those which did not respond to the last round of
/// pings.
///
- /// This may be called on any timescale you want, however, roughly once every five to ten
- /// seconds is preferred. The call rate determines both how often we send a ping to our peers
- /// and how much time they have to respond before we disconnect them.
+ /// This may be called on any timescale you want, however, roughly once every ten seconds is
+ /// preferred. The call rate determines both how often we send a ping to our peers and how much
+ /// time they have to respond before we disconnect them.
///
/// May call [`send_data`] on all [`SocketDescriptor`]s. Thus, be very careful with reentrancy
/// issues!
///
/// [`send_data`]: SocketDescriptor::send_data
pub fn timer_tick_occurred(&self) {
- let mut peers_lock = self.peers.lock().unwrap();
+ let mut descriptors_needing_disconnect = Vec::new();
{
- let peers = &mut *peers_lock;
- let node_id_to_descriptor = &mut peers.node_id_to_descriptor;
- let peers = &mut peers.peers;
- let mut descriptors_needing_disconnect = Vec::new();
+ let peers_lock = self.peers.read().unwrap();
+
+ for (descriptor, peer_mutex) in peers_lock.iter() {
+ let mut peer = peer_mutex.lock().unwrap();
+ if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_node_id.is_none() {
+ // The peer needs to complete its handshake before we can exchange messages. We
+ // give peers one timer tick to complete handshake, reusing
+ // `awaiting_pong_timer_tick_intervals` to track number of timer ticks taken
+ // for handshake completion.
+ if peer.awaiting_pong_timer_tick_intervals != 0 {
+ descriptors_needing_disconnect.push(descriptor.clone());
+ } else {
+ peer.awaiting_pong_timer_tick_intervals = 1;
+ }
+ continue;
+ }
- peers.retain(|descriptor, peer| {
- if peer.awaiting_pong {
+ if peer.awaiting_pong_timer_tick_intervals == -1 {
+ // Magic value set in `maybe_send_extra_ping`.
+ peer.awaiting_pong_timer_tick_intervals = 1;
+ peer.received_message_since_timer_tick = false;
+ continue;
+ }
+
+ if (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick)
+ || peer.awaiting_pong_timer_tick_intervals as u64 >
+ MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64
+ {
descriptors_needing_disconnect.push(descriptor.clone());
- match peer.their_node_id {
- Some(node_id) => {
- log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
- node_id_to_descriptor.remove(&node_id);
- self.message_handler.chan_handler.peer_disconnected(&node_id, false);
- }
- None => {
- // This can't actually happen as we should have hit
- // is_ready_for_encryption() previously on this same peer.
- unreachable!();
- },
- }
- return false;
+ continue;
}
+ peer.received_message_since_timer_tick = false;
- if !peer.channel_encryptor.is_ready_for_encryption() {
- // The peer needs to complete its handshake before we can exchange messages
- return true;
+ if peer.awaiting_pong_timer_tick_intervals > 0 {
+ peer.awaiting_pong_timer_tick_intervals += 1;
+ continue;
}
+ peer.awaiting_pong_timer_tick_intervals = 1;
let ping = msgs::Ping {
ponglen: 0,
byteslen: 64,
};
- self.enqueue_message(peer, &ping);
-
- let mut descriptor_clone = descriptor.clone();
- self.do_attempt_write_data(&mut descriptor_clone, peer);
+ self.enqueue_message(&mut *peer, &ping);
+ self.do_attempt_write_data(&mut (descriptor.clone()), &mut *peer);
+ }
+ }
- peer.awaiting_pong = true;
- true
- });
+ if !descriptors_needing_disconnect.is_empty() {
+ {
+ let mut peers_lock = self.peers.write().unwrap();
+ for descriptor in descriptors_needing_disconnect.iter() {
+ if let Some(peer) = peers_lock.remove(descriptor) {
+ if let Some(node_id) = peer.lock().unwrap().their_node_id {
+ log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
+ self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+ self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
+ }
+ }
+ }
+ }
for mut descriptor in descriptors_needing_disconnect.drain(..) {
descriptor.disconnect_socket();
}
}
}
+
+ #[allow(dead_code)]
+ // Messages of up to 64KB should never end up more than half full with addresses, as that would
+ // be absurd. We ensure this by checking that at least 100 (our stated public contract on when
+ // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
+ // message...
+ const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
+ #[deny(const_err)]
+ #[allow(dead_code)]
+ // ...by failing to compile if the number of addresses that would be half of a message is
+ // smaller than 100:
+ const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 100;
+
+ /// Generates a signed node_announcement from the given arguments, sending it to all connected
+ /// peers. Note that peers will likely ignore this message unless we have at least one public
+ /// channel which has at least six confirmations on-chain.
+ ///
+ /// `rgb` is a node "color" and `alias` is a printable human-readable string to describe this
+ /// node to humans. They carry no in-protocol meaning.
+ ///
+ /// `addresses` represent the set (possibly empty) of socket addresses on which this node
+ /// accepts incoming connections. These will be included in the node_announcement, publicly
+ /// tying these addresses together and to this node. If you wish to preserve user privacy,
+ /// addresses should likely contain only Tor Onion addresses.
+ ///
+ /// Panics if `addresses` is absurdly large (more than 100).
+ ///
+ /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
+ pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
+ if addresses.len() > 100 {
+ panic!("More than half the message size was taken up by public addresses!");
+ }
+
+ // While all existing nodes handle unsorted addresses just fine, the spec requires that
+ // addresses be sorted for future compatibility.
+ addresses.sort_by_key(|addr| addr.get_id());
+
+ let features = self.message_handler.chan_handler.provided_node_features()
+ .or(self.message_handler.route_handler.provided_node_features())
+ .or(self.message_handler.onion_message_handler.provided_node_features());
+ let announcement = msgs::UnsignedNodeAnnouncement {
+ features,
+ timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel),
+ node_id: PublicKey::from_secret_key(&self.secp_ctx, &self.our_node_secret),
+ rgb, alias, addresses,
+ excess_address_data: Vec::new(),
+ excess_data: Vec::new(),
+ };
+ let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
+ let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_node_secret);
+
+ let msg = msgs::NodeAnnouncement {
+ signature: node_announce_sig,
+ contents: announcement
+ };
+
+ log_debug!(self.logger, "Broadcasting NodeAnnouncement after passing it to our own RoutingMessageHandler.");
+ let _ = self.message_handler.route_handler.handle_node_announcement(&msg);
+ self.forward_broadcast_msg(&*self.peers.read().unwrap(), &wire::Message::NodeAnnouncement(msg), None);
+ }
+}
+
+fn is_gossip_msg(type_id: u16) -> bool {
+ match type_id {
+ msgs::ChannelAnnouncement::TYPE |
+ msgs::ChannelUpdate::TYPE |
+ msgs::NodeAnnouncement::TYPE |
+ msgs::QueryChannelRange::TYPE |
+ msgs::ReplyChannelRange::TYPE |
+ msgs::QueryShortChannelIds::TYPE |
+ msgs::ReplyShortChannelIdsEnd::TYPE => true,
+ _ => false
+ }
}
#[cfg(test)]
mod tests {
- use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
- use ln::msgs;
- use util::events;
- use util::test_utils;
+ use crate::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
+ use crate::ln::{msgs, wire};
+ use crate::ln::msgs::NetAddress;
+ use crate::util::events;
+ use crate::util::test_utils;
use bitcoin::secp256k1::Secp256k1;
- use bitcoin::secp256k1::key::{SecretKey, PublicKey};
+ use bitcoin::secp256k1::{SecretKey, PublicKey};
- use prelude::*;
- use sync::{Arc, Mutex};
+ use crate::prelude::*;
+ use crate::sync::{Arc, Mutex};
use core::sync::atomic::Ordering;
#[derive(Clone)]
cfgs
}
- fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>> {
+ fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>> {
let mut peers = Vec::new();
for i in 0..peer_count {
let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap();
let ephemeral_bytes = [i as u8; 32];
- let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler };
- let peer = PeerManager::new(msg_handler, node_secret, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {});
+ let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler, onion_message_handler: IgnoringMessageHandler {} };
+ let peer = PeerManager::new(msg_handler, node_secret, 0, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {});
peers.push(peer);
}
peers
}
- fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>) -> (FileDescriptor, FileDescriptor) {
+ fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>) -> (FileDescriptor, FileDescriptor) {
let secp_ctx = Secp256k1::new();
let a_id = PublicKey::from_secret_key(&secp_ctx, &peer_a.our_node_secret);
let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
- let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone()).unwrap();
- peer_a.new_inbound_connection(fd_a.clone()).unwrap();
+ let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
+ peer_a.new_inbound_connection(fd_a.clone(), None).unwrap();
assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
peer_a.process_events();
- assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+
+ let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+
peer_b.process_events();
- assert_eq!(peer_a.read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+ let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peer_a.read_event(&mut fd_a, &b_data).unwrap(), false);
+
+ peer_a.process_events();
+ let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+
(fd_a.clone(), fd_b.clone())
}
let chan_handler = test_utils::TestChannelMessageHandler::new();
let mut peers = create_network(2, &cfgs);
establish_connection(&peers[0], &peers[1]);
- assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+ assert_eq!(peers[0].peers.read().unwrap().len(), 1);
let secp_ctx = Secp256k1::new();
let their_id = PublicKey::from_secret_key(&secp_ctx, &peers[1].our_node_secret);
peers[0].message_handler.chan_handler = &chan_handler;
peers[0].process_events();
- assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0);
+ assert_eq!(peers[0].peers.read().unwrap().len(), 0);
+ }
+
+ #[test]
+ fn test_send_simple_msg() {
+ // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
+ // push a message from one peer to another.
+ let cfgs = create_peermgr_cfgs(2);
+ let a_chan_handler = test_utils::TestChannelMessageHandler::new();
+ let b_chan_handler = test_utils::TestChannelMessageHandler::new();
+ let mut peers = create_network(2, &cfgs);
+ let (fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]);
+ assert_eq!(peers[0].peers.read().unwrap().len(), 1);
+
+ let secp_ctx = Secp256k1::new();
+ let their_id = PublicKey::from_secret_key(&secp_ctx, &peers[1].our_node_secret);
+
+ let msg = msgs::Shutdown { channel_id: [42; 32], scriptpubkey: bitcoin::Script::new() };
+ a_chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::SendShutdown {
+ node_id: their_id, msg: msg.clone()
+ });
+ peers[0].message_handler.chan_handler = &a_chan_handler;
+
+ b_chan_handler.expect_receive_msg(wire::Message::Shutdown(msg));
+ peers[1].message_handler.chan_handler = &b_chan_handler;
+
+ peers[0].process_events();
+
+ let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false);
+ }
+
+ #[test]
+ fn test_disconnect_all_peer() {
+ // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
+ // then calls disconnect_all_peers
+ let cfgs = create_peermgr_cfgs(2);
+ let peers = create_network(2, &cfgs);
+ establish_connection(&peers[0], &peers[1]);
+ assert_eq!(peers[0].peers.read().unwrap().len(), 1);
+
+ peers[0].disconnect_all_peers();
+ assert_eq!(peers[0].peers.read().unwrap().len(), 0);
}
#[test]
let cfgs = create_peermgr_cfgs(2);
let peers = create_network(2, &cfgs);
establish_connection(&peers[0], &peers[1]);
- assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+ assert_eq!(peers[0].peers.read().unwrap().len(), 1);
// peers[0] awaiting_pong is set to true, but the Peer is still connected
peers[0].timer_tick_occurred();
peers[0].process_events();
- assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+ assert_eq!(peers[0].peers.read().unwrap().len(), 1);
// Since timer_tick_occurred() is called again when awaiting_pong is true, all Peers are disconnected
peers[0].timer_tick_occurred();
peers[0].process_events();
- assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0);
+ assert_eq!(peers[0].peers.read().unwrap().len(), 0);
}
#[test]
// than can fit into a peer's buffer).
let (mut fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]);
- // Make each peer to read the messages that the other peer just wrote to them.
- peers[0].process_events();
- peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap();
- peers[1].process_events();
- peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap();
+ // Make each peer to read the messages that the other peer just wrote to them. Note that
+ // due to the max-message-before-ping limits this may take a few iterations to complete.
+ for _ in 0..150/super::BUFFER_DRAIN_MSGS_PER_TICK + 1 {
+ peers[1].process_events();
+ let a_read_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+ assert!(!a_read_data.is_empty());
+
+ peers[0].read_event(&mut fd_a, &a_read_data).unwrap();
+ peers[0].process_events();
+
+ let b_read_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert!(!b_read_data.is_empty());
+ peers[1].read_event(&mut fd_b, &b_read_data).unwrap();
+
+ peers[0].process_events();
+ assert_eq!(fd_a.outbound_data.lock().unwrap().len(), 0, "Until A receives data, it shouldn't send more messages");
+ }
// Check that each peer has received the expected number of channel updates and channel
// announcements.
- assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
- assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
- assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
- assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
+ assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 108);
+ assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 54);
+ assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 108);
+ assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 54);
+ }
+
+ #[test]
+ fn test_handshake_timeout() {
+ // Tests that we time out a peer still waiting on handshake completion after a full timer
+ // tick.
+ let cfgs = create_peermgr_cfgs(2);
+ cfgs[0].routing_handler.request_full_sync.store(true, Ordering::Release);
+ cfgs[1].routing_handler.request_full_sync.store(true, Ordering::Release);
+ let peers = create_network(2, &cfgs);
+
+ let secp_ctx = Secp256k1::new();
+ let a_id = PublicKey::from_secret_key(&secp_ctx, &peers[0].our_node_secret);
+ let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
+ let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
+ let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
+ peers[0].new_inbound_connection(fd_a.clone(), None).unwrap();
+
+ // If we get a single timer tick before completion, that's fine
+ assert_eq!(peers[0].peers.read().unwrap().len(), 1);
+ peers[0].timer_tick_occurred();
+ assert_eq!(peers[0].peers.read().unwrap().len(), 1);
+
+ assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false);
+ peers[0].process_events();
+ let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false);
+ peers[1].process_events();
+
+ // ...but if we get a second timer tick, we should disconnect the peer
+ peers[0].timer_tick_occurred();
+ assert_eq!(peers[0].peers.read().unwrap().len(), 0);
+
+ let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+ assert!(peers[0].read_event(&mut fd_a, &b_data).is_err());
+ }
+
+ #[test]
+ fn test_filter_addresses(){
+ // Tests the filter_addresses function.
+
+ // For (10/8)
+ let ip_address = NetAddress::IPv4{addr: [10, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [10, 0, 255, 201], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [10, 255, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (0/8)
+ let ip_address = NetAddress::IPv4{addr: [0, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [0, 0, 255, 187], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [0, 255, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (100.64/10)
+ let ip_address = NetAddress::IPv4{addr: [100, 64, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [100, 78, 255, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [100, 127, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (127/8)
+ let ip_address = NetAddress::IPv4{addr: [127, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [127, 65, 73, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [127, 255, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (169.254/16)
+ let ip_address = NetAddress::IPv4{addr: [169, 254, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [169, 254, 221, 101], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [169, 254, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (172.16/12)
+ let ip_address = NetAddress::IPv4{addr: [172, 16, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [172, 27, 101, 23], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [172, 31, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (192.168/16)
+ let ip_address = NetAddress::IPv4{addr: [192, 168, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [192, 168, 205, 159], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [192, 168, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (192.88.99/24)
+ let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 140], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For other IPv4 addresses
+ let ip_address = NetAddress::IPv4{addr: [188, 255, 99, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+ let ip_address = NetAddress::IPv4{addr: [123, 8, 129, 14], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+ let ip_address = NetAddress::IPv4{addr: [2, 88, 9, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+
+ // For (2000::/3)
+ let ip_address = NetAddress::IPv6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+ let ip_address = NetAddress::IPv6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+ let ip_address = NetAddress::IPv6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+
+ // For other IPv6 addresses
+ let ip_address = NetAddress::IPv6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+ let ip_address = NetAddress::IPv6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000};
+ assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+ // For (None)
+ assert_eq!(filter_addresses(None), None);
}
}