X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fpeer_handler.rs;h=d3ac2ebe9a39bf2e918f58dbf4d012ac0434ae99;hb=952cee4a168d266d7bdd43d21ac71a3e3de46a53;hp=234b509564dc2b7440eb4d1f0fd1028cf995d0a6;hpb=2e02aa7dab1228863b602b7db7b67d8bd7dbee9d;p=rust-lightning diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 234b5095..d3ac2ebe 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -21,29 +21,41 @@ use ln::features::InitFeatures; use ln::msgs; use ln::msgs::{ChannelMessageHandler, LightningError, RoutingMessageHandler}; use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager}; -use util::ser::{VecWriter, Writeable}; +use util::ser::{VecWriter, Writeable, Writer}; use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep}; use ln::wire; use ln::wire::Encode; -use util::byte_utils; +use util::atomic_counter::AtomicCounter; use util::events::{MessageSendEvent, MessageSendEventsProvider}; use util::logger::Logger; -use routing::network_graph::NetGraphMsgHandler; +use routing::network_graph::{NetworkGraph, NetGraphMsgHandler}; use prelude::*; use io; use alloc::collections::LinkedList; -use alloc::fmt::Debug; use sync::{Arc, Mutex}; -use core::sync::atomic::{AtomicUsize, Ordering}; use core::{cmp, hash, fmt, mem}; use core::ops::Deref; +use core::convert::Infallible; #[cfg(feature = "std")] use std::error; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::sha256::HashEngine as Sha256Engine; use bitcoin::hashes::{HashEngine, Hash}; +/// Handler for BOLT1-compliant messages. +pub trait CustomMessageHandler: wire::CustomMessageReader { + /// Called with the message type that was received and the buffer to be read. + /// Can return a `MessageHandlingError` if the message could not be handled. + fn handle_custom_message(&self, msg: Self::CustomMessage, sender_node_id: &PublicKey) -> Result<(), LightningError>; + + /// Gets the list of pending messages which were generated by the custom message + /// handler, clearing the list in the process. The first tuple element must + /// correspond to the intended recipients node ids. If no connection to one of the + /// specified node does not exist, the message is simply not sent to it. + fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)>; +} + /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information /// or doing any processing. You can provide one of these as the route_handler in a MessageHandler. pub struct IgnoringMessageHandler{} @@ -54,7 +66,6 @@ impl RoutingMessageHandler for IgnoringMessageHandler { fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result { Ok(false) } fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result { Ok(false) } fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result { Ok(false) } - fn handle_htlc_fail_channel_update(&self, _update: &msgs::HTLCFailChannelUpdate) {} fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option, Option)> { Vec::new() } fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec { Vec::new() } @@ -69,6 +80,35 @@ impl Deref for IgnoringMessageHandler { fn deref(&self) -> &Self { self } } +// Implement Type for Infallible, note that it cannot be constructed, and thus you can never call a +// method that takes self for it. +impl wire::Type for Infallible { + fn type_id(&self) -> u16 { + unreachable!(); + } +} +impl Writeable for Infallible { + fn write(&self, _: &mut W) -> Result<(), io::Error> { + unreachable!(); + } +} + +impl wire::CustomMessageReader for IgnoringMessageHandler { + type CustomMessage = Infallible; + fn read(&self, _message_type: u16, _buffer: &mut R) -> Result, msgs::DecodeError> { + Ok(None) + } +} + +impl CustomMessageHandler for IgnoringMessageHandler { + fn handle_custom_message(&self, _msg: Infallible, _sender_node_id: &PublicKey) -> Result<(), LightningError> { + // Since we always return `None` in the read the handle method should never be called. + unreachable!(); + } + + fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() } +} + /// A dummy struct which implements `ChannelMessageHandler` without having any channels. /// You can provide one of these as the route_handler in a MessageHandler. pub struct ErroringMessageHandler { @@ -245,6 +285,10 @@ enum InitSyncTracker{ NodesSyncing(PublicKey), } +/// The ratio between buffer sizes at which we stop sending initial sync messages vs when we stop +/// forwarding gossip messages to peers altogether. +const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2; + /// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until /// we have fewer than this many messages in the outbound buffer again. /// We also use this as the target number of outbound gossip messages to keep in the write buffer, @@ -252,7 +296,29 @@ enum InitSyncTracker{ const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10; /// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to /// the peer. -const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = 20; +const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO; + +/// If we've sent a ping, and are still awaiting a response, we may need to churn our way through +/// the socket receive buffer before receiving the ping. +/// +/// On a fairly old Arm64 board, with Linux defaults, this can take as long as 20 seconds, not +/// including any network delays, outbound traffic, or the same for messages from other peers. +/// +/// Thus, to avoid needlessly disconnecting a peer, we allow a peer to take this many timer ticks +/// per connected peer to respond to a ping, as long as they send us at least one message during +/// each tick, ensuring we aren't actually just disconnected. +/// With a timer tick interval of ten seconds, this translates to about 40 seconds per connected +/// peer. +/// +/// When we improve parallelism somewhat we should reduce this to e.g. this many timer ticks per +/// two connected peers, assuming most LDK-running systems have at least two cores. +const MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER: i8 = 4; + +/// This is the minimum number of messages we expect a peer to be able to handle within one timer +/// tick. Once we have sent this many messages since the last ping, we send a ping right away to +/// ensures we don't just fill up our send buffer and leave the peer with too many messages to +/// process before the next ping. +const BUFFER_DRAIN_MSGS_PER_TICK: usize = 32; struct Peer { channel_encryptor: PeerChannelEncryptor, @@ -269,7 +335,9 @@ struct Peer { sync_status: InitSyncTracker, - awaiting_pong: bool, + msgs_sent_since_pong: usize, + awaiting_pong_timer_tick_intervals: i8, + received_message_since_timer_tick: bool, } impl Peer { @@ -303,18 +371,12 @@ struct PeerHolder { node_id_to_descriptor: HashMap, } -#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] -fn _check_usize_is_32_or_64() { - // See below, less than 32 bit pointers may be unsafe here! - unsafe { mem::transmute::<*const usize, [u8; 4]>(panic!()); } -} - /// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g. /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case /// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents /// issues such as overly long function definitions. -pub type SimpleArcPeerManager = PeerManager>, Arc, Arc>>, Arc>; +pub type SimpleArcPeerManager = PeerManager>, Arc, Arc, Arc>>, Arc, Arc>; /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference /// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't @@ -322,7 +384,7 @@ pub type SimpleArcPeerManager = PeerManager = PeerManager, &'e NetGraphMsgHandler<&'g C, &'f L>, &'f L>; +pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager, &'e NetGraphMsgHandler<&'g NetworkGraph, &'h C, &'f L>, &'f L, IgnoringMessageHandler>; /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls /// socket events into messages which it passes on to its [`MessageHandler`]. @@ -343,19 +405,18 @@ pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, SD, M, T, F, C, L> = P /// you're using lightning-net-tokio. /// /// [`read_event`]: PeerManager::read_event -pub struct PeerManager where +pub struct PeerManager where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, - L::Target: Logger { + L::Target: Logger, + CMH::Target: CustomMessageHandler { message_handler: MessageHandler, peers: Mutex>, our_node_secret: SecretKey, ephemeral_key_midstate: Sha256Engine, + custom_message_handler: CMH, - // Usize needs to be at least 32 bits to avoid overflowing both low and high. If usize is 64 - // bits we will never realistically count into high: - peer_counter_low: AtomicUsize, - peer_counter_high: AtomicUsize, + peer_counter: AtomicCounter, logger: L, } @@ -385,7 +446,7 @@ macro_rules! encode_msg { }} } -impl PeerManager where +impl PeerManager where CM::Target: ChannelMessageHandler, L::Target: Logger { /// Constructs a new PeerManager with the given ChannelMessageHandler. No routing message @@ -399,11 +460,11 @@ impl PeerManager PeerManager where +impl PeerManager where RM::Target: RoutingMessageHandler, L::Target: Logger { /// Constructs a new PeerManager with the given RoutingMessageHandler. No channel message @@ -419,18 +480,30 @@ impl PeerManager" for an optional pubkey. +/// This works around `format!()` taking a reference to each argument, preventing +/// `if let Some(node_id) = peer.their_node_id { format!(.., node_id) } else { .. }` from compiling +/// due to lifetime errors. +struct OptionalFromDebugger<'a>(&'a Option); +impl core::fmt::Display for OptionalFromDebugger<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + if let Some(node_id) = self.0 { write!(f, " from {}", log_pubkey!(node_id)) } else { Ok(()) } } } -impl PeerManager where +impl PeerManager where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, - L::Target: Logger { + L::Target: Logger, + CMH::Target: CustomMessageHandler { /// Constructs a new PeerManager with the given message handlers and node_id secret key /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be /// cryptographically secure random bytes. - pub fn new(message_handler: MessageHandler, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L) -> Self { + pub fn new(message_handler: MessageHandler, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self { let mut ephemeral_key_midstate = Sha256::engine(); ephemeral_key_midstate.input(ephemeral_random_data); @@ -442,9 +515,9 @@ impl PeerManager PeerManager SecretKey { let mut ephemeral_hash = self.ephemeral_key_midstate.clone(); - let low = self.peer_counter_low.fetch_add(1, Ordering::AcqRel); - let high = if low == 0 { - self.peer_counter_high.fetch_add(1, Ordering::AcqRel) - } else { - self.peer_counter_high.load(Ordering::Acquire) - }; - ephemeral_hash.input(&byte_utils::le64_to_array(low as u64)); - ephemeral_hash.input(&byte_utils::le64_to_array(high as u64)); + let counter = self.peer_counter.get_increment(); + ephemeral_hash.input(&counter.to_le_bytes()); SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!") } @@ -507,7 +574,9 @@ impl PeerManager PeerManager PeerManager {}, InitSyncTracker::ChannelsSyncing(c) if c < 0xffff_ffff_ffff_ffff => { @@ -599,6 +670,9 @@ impl PeerManager= BUFFER_DRAIN_MSGS_PER_TICK { + self.maybe_send_extra_ping(peer); + } if { let next_buff = match peer.pending_outbound_buffer.front() { @@ -674,14 +748,23 @@ impl PeerManager(&self, peer: &mut Peer, message: &M) { - let mut buffer = VecWriter(Vec::new()); + /// Append a message to a peer's pending outbound/write buffer + fn enqueue_encoded_message(&self, peer: &mut Peer, encoded_message: &Vec) { + peer.msgs_sent_since_pong += 1; + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..])); + } + + /// Append a message to a peer's pending outbound/write buffer + fn enqueue_message(&self, peer: &mut Peer, message: &M) { + let mut buffer = VecWriter(Vec::with_capacity(2048)); wire::write(message, &mut buffer).unwrap(); // crash if the write failed - let encoded_message = buffer.0; - log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..])); + if is_gossip_msg(message.type_id()) { + log_gossip!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())); + } else { + log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())) + } + self.enqueue_encoded_message(peer, &buffer.0); } fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result { @@ -721,25 +804,31 @@ impl PeerManager { //TODO: Try to push msg - log_debug!(self.logger, "Error handling message; disconnecting peer with: {}", e.err); + log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer.their_node_id), e.err); return Err(PeerHandleError{ no_connection_possible: false }); }, msgs::ErrorAction::IgnoreAndLog(level) => { - log_given_level!(self.logger, level, "Error handling message; ignoring: {}", e.err); + log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err); continue }, + msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these msgs::ErrorAction::IgnoreError => { - log_debug!(self.logger, "Error handling message; ignoring: {}", e.err); + log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err); continue; }, msgs::ErrorAction::SendErrorMessage { msg } => { - log_debug!(self.logger, "Error handling message; sending error message with: {}", e.err); + log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer.their_node_id), e.err); + self.enqueue_message(peer, &msg); + continue; + }, + msgs::ErrorAction::SendWarningMessage { msg, log_level } => { + log_given_level!(self.logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer.their_node_id), e.err); self.enqueue_message(peer, &msg); continue; }, } } - }; + } } } @@ -777,6 +866,7 @@ impl PeerManager { let their_node_id = try_potential_handleerror!(peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..])); @@ -787,6 +877,7 @@ impl PeerManager { if peer.pending_read_is_header { @@ -806,30 +897,45 @@ impl PeerManager x, Err(e) => { match e { - msgs::DecodeError::UnknownVersion => return Err(PeerHandleError { no_connection_possible: false }), - msgs::DecodeError::UnknownRequiredFeature => { - log_trace!(self.logger, "Got a channel/node announcement with an known required feature flag, you may want to update!"); + // Note that to avoid recursion we never call + // `do_attempt_write_data` from here, causing + // the messages enqueued here to not actually + // be sent before the peer is disconnected. + (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => { + log_gossip!(self.logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!"); continue; } - msgs::DecodeError::InvalidValue => { + (msgs::DecodeError::UnsupportedCompression, _) => { + log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message"); + self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unsupported message compression: zlib".to_owned() }); + continue; + } + (_, Some(ty)) if is_gossip_msg(ty) => { + log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message"); + self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unreadable/bogus gossip message".to_owned() }); + continue; + } + (msgs::DecodeError::UnknownRequiredFeature, ty) => { + log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!"); + self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) }); + return Err(PeerHandleError { no_connection_possible: false }); + } + (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }), + (msgs::DecodeError::InvalidValue, _) => { log_debug!(self.logger, "Got an invalid value while deserializing message"); return Err(PeerHandleError { no_connection_possible: false }); } - msgs::DecodeError::ShortRead => { + (msgs::DecodeError::ShortRead, _) => { log_debug!(self.logger, "Deserialization failed due to shortness of message"); return Err(PeerHandleError { no_connection_possible: false }); } - msgs::DecodeError::BadLengthDescriptor => return Err(PeerHandleError { no_connection_possible: false }), - msgs::DecodeError::Io(_) => return Err(PeerHandleError { no_connection_possible: false }), - msgs::DecodeError::UnsupportedCompression => { - log_trace!(self.logger, "We don't support zlib-compressed message fields, ignoring message"); - continue; - } + (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }), + (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }), } } }; @@ -869,8 +975,18 @@ impl PeerManager Result, MessageHandlingError> { - log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); + fn handle_message( + &self, + peer: &mut Peer, + message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage> + ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> { + if is_gossip_msg(message.type_id()) { + log_gossip!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); + } else { + log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); + } + + peer.received_message_since_timer_tick = true; // Need an Init as first message if let wire::Message::Init(_) = message { @@ -892,7 +1008,7 @@ impl PeerManager PeerManager { + let mut data_is_printable = true; + for b in msg.data.bytes() { + if b < 32 || b > 126 { + data_is_printable = false; + break; + } + } + + if data_is_printable { + log_debug!(self.logger, "Got warning message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.data); + } else { + log_debug!(self.logger, "Got warning message from {} with non-ASCII error message", log_pubkey!(peer.their_node_id.unwrap())); + } + }, wire::Message::Ping(msg) => { if msg.ponglen < 65532 { @@ -934,7 +1065,8 @@ impl PeerManager { - peer.awaiting_pong = false; + peer.awaiting_pong_timer_tick_intervals = 0; + peer.msgs_sent_since_pong = 0; }, // Channel messages: @@ -1029,22 +1161,25 @@ impl PeerManager { - log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", msg_type); + wire::Message::Unknown(type_id) if message.is_even() => { + log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id); // Fail the channel if message is an even, unknown type as per BOLT #1. return Err(PeerHandleError{ no_connection_possible: true }.into()); }, - wire::Message::Unknown(msg_type) => { - log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", msg_type); - } + wire::Message::Unknown(type_id) => { + log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id); + }, + wire::Message::Custom(custom) => { + self.custom_message_handler.handle_custom_message(custom, &peer.their_node_id.unwrap())?; + }, }; Ok(should_forward) } - fn forward_broadcast_msg(&self, peers: &mut PeerHolder, msg: &wire::Message, except_node: Option<&PublicKey>) { + fn forward_broadcast_msg(&self, peers: &mut PeerHolder, msg: &wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) { match msg { wire::Message::ChannelAnnouncement(ref msg) => { - log_trace!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); + log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1052,8 +1187,10 @@ impl PeerManager OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP { - log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO + { + log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id_1) || @@ -1063,11 +1200,11 @@ impl PeerManager { - log_trace!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg); + log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1075,8 +1212,10 @@ impl PeerManager OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP { - log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO + { + log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id) { @@ -1085,11 +1224,11 @@ impl PeerManager { - log_trace!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg); + log_gossip!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1097,14 +1236,16 @@ impl PeerManager OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP { - log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO + { + log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if except_node.is_some() && peer.their_node_id.as_ref() == except_node { continue; } - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_msg[..])); + self.enqueue_encoded_message(peer, &encoded_msg); } }, _ => debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages"), @@ -1118,6 +1259,9 @@ impl PeerManager PeerManager { - { - match peers.node_id_to_descriptor.get($node_id) { - Some(descriptor) => match peers.peers.get_mut(&descriptor) { - Some(peer) => { - if peer.their_features.is_none() { - continue; - } - peer - }, - None => panic!("Inconsistent peers set state!"), - }, - None => { - continue; + macro_rules! get_peer_for_forwarding { + ($node_id: expr) => { + { + match peers.node_id_to_descriptor.get($node_id) { + Some(descriptor) => match peers.peers.get_mut(&descriptor) { + Some(peer) => { + if peer.their_features.is_none() { + continue; + } + peer }, - } + None => panic!("Inconsistent peers set state!"), + }, + None => { + continue; + }, } } } + } + for event in events_generated.drain(..) { match event { MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", @@ -1243,21 +1387,31 @@ impl PeerManager { log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); - if self.message_handler.route_handler.handle_channel_announcement(&msg).is_ok() && self.message_handler.route_handler.handle_channel_update(&update_msg).is_ok() { - self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None); - self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None); + match self.message_handler.route_handler.handle_channel_announcement(&msg) { + Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => + self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None), + _ => {}, + } + match self.message_handler.route_handler.handle_channel_update(&update_msg) { + Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => + self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None), + _ => {}, } }, MessageSendEvent::BroadcastNodeAnnouncement { msg } => { log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler"); - if self.message_handler.route_handler.handle_node_announcement(&msg).is_ok() { - self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None); + match self.message_handler.route_handler.handle_node_announcement(&msg) { + Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => + self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None), + _ => {}, } }, MessageSendEvent::BroadcastChannelUpdate { msg } => { log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id); - if self.message_handler.route_handler.handle_channel_update(&msg).is_ok() { - self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None); + match self.message_handler.route_handler.handle_channel_update(&msg) { + Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => + self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None), + _ => {}, } }, MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { @@ -1266,9 +1420,6 @@ impl PeerManager { - self.message_handler.route_handler.handle_htlc_fail_channel_update(update); - }, MessageSendEvent::HandleError { ref node_id, ref action } => { match *action { msgs::ErrorAction::DisconnectPeer { ref msg } => { @@ -1283,7 +1434,7 @@ impl PeerManager PeerManager { log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); }, + msgs::ErrorAction::IgnoreDuplicateGossip => {}, msgs::ErrorAction::IgnoreError => { log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); }, @@ -1302,6 +1454,12 @@ impl PeerManager { + log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}", + log_pubkey!(node_id), + msg.data); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); + }, } }, MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => { @@ -1311,7 +1469,7 @@ impl PeerManager { - log_trace!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", + log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", log_pubkey!(node_id), msg.short_channel_ids.len(), msg.first_blocknum, @@ -1322,6 +1480,10 @@ impl PeerManager PeerManager PeerManager 0 && !peer.received_message_since_timer_tick) + || peer.awaiting_pong_timer_tick_intervals as u64 > + MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peer_count as u64 + { descriptors_needing_disconnect.push(descriptor.clone()); match peer.their_node_id { Some(node_id) => { @@ -1404,30 +1623,25 @@ impl PeerManager { - // This can't actually happen as we should have hit - // is_ready_for_encryption() previously on this same peer. - unreachable!(); - }, + None => {}, } return false; } + peer.received_message_since_timer_tick = false; - if !peer.channel_encryptor.is_ready_for_encryption() { - // The peer needs to complete its handshake before we can exchange messages + if peer.awaiting_pong_timer_tick_intervals > 0 { + peer.awaiting_pong_timer_tick_intervals += 1; return true; } + peer.awaiting_pong_timer_tick_intervals = 1; let ping = msgs::Ping { ponglen: 0, byteslen: 64, }; self.enqueue_message(peer, &ping); + self.do_attempt_write_data(&mut (descriptor.clone()), &mut *peer); - let mut descriptor_clone = descriptor.clone(); - self.do_attempt_write_data(&mut descriptor_clone, peer); - - peer.awaiting_pong = true; true }); @@ -1438,9 +1652,18 @@ impl PeerManager bool { + match type_id { + msgs::ChannelAnnouncement::TYPE | + msgs::ChannelUpdate::TYPE | + msgs::NodeAnnouncement::TYPE => true, + _ => false + } +} + #[cfg(test)] mod tests { - use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor}; + use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler}; use ln::msgs; use util::events; use util::test_utils; @@ -1499,20 +1722,20 @@ mod tests { cfgs } - fn create_network<'a>(peer_count: usize, cfgs: &'a Vec) -> Vec> { + fn create_network<'a>(peer_count: usize, cfgs: &'a Vec) -> Vec> { let mut peers = Vec::new(); for i in 0..peer_count { let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap(); let ephemeral_bytes = [i as u8; 32]; let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler }; - let peer = PeerManager::new(msg_handler, node_secret, &ephemeral_bytes, &cfgs[i].logger); + let peer = PeerManager::new(msg_handler, node_secret, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {}); peers.push(peer); } peers } - fn establish_connection<'a>(peer_a: &PeerManager, peer_b: &PeerManager) -> (FileDescriptor, FileDescriptor) { + fn establish_connection<'a>(peer_a: &PeerManager, peer_b: &PeerManager) -> (FileDescriptor, FileDescriptor) { let secp_ctx = Secp256k1::new(); let a_id = PublicKey::from_secret_key(&secp_ctx, &peer_a.our_node_secret); let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; @@ -1586,11 +1809,23 @@ mod tests { // than can fit into a peer's buffer). let (mut fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]); - // Make each peer to read the messages that the other peer just wrote to them. - peers[0].process_events(); - peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(); - peers[1].process_events(); - peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap(); + // Make each peer to read the messages that the other peer just wrote to them. Note that + // due to the max-messagse-before-ping limits this may take a few iterations to complete. + for _ in 0..150/super::BUFFER_DRAIN_MSGS_PER_TICK + 1 { + peers[0].process_events(); + let b_read_data = fd_a.outbound_data.lock().unwrap().split_off(0); + assert!(!b_read_data.is_empty()); + + peers[1].read_event(&mut fd_b, &b_read_data).unwrap(); + peers[1].process_events(); + + let a_read_data = fd_b.outbound_data.lock().unwrap().split_off(0); + assert!(!a_read_data.is_empty()); + peers[0].read_event(&mut fd_a, &a_read_data).unwrap(); + + peers[1].process_events(); + assert_eq!(fd_b.outbound_data.lock().unwrap().len(), 0, "Until B receives data, it shouldn't send more messages"); + } // Check that each peer has received the expected number of channel updates and channel // announcements. @@ -1599,4 +1834,37 @@ mod tests { assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100); assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50); } + + #[test] + fn test_handshake_timeout() { + // Tests that we time out a peer still waiting on handshake completion after a full timer + // tick. + let cfgs = create_peermgr_cfgs(2); + cfgs[0].routing_handler.request_full_sync.store(true, Ordering::Release); + cfgs[1].routing_handler.request_full_sync.store(true, Ordering::Release); + let peers = create_network(2, &cfgs); + + let secp_ctx = Secp256k1::new(); + let a_id = PublicKey::from_secret_key(&secp_ctx, &peers[0].our_node_secret); + let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; + let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; + let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone()).unwrap(); + peers[0].new_inbound_connection(fd_a.clone()).unwrap(); + + // If we get a single timer tick before completion, that's fine + assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1); + peers[0].timer_tick_occurred(); + assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1); + + assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false); + peers[0].process_events(); + assert_eq!(peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); + peers[1].process_events(); + + // ...but if we get a second timer tick, we should disconnect the peer + peers[0].timer_tick_occurred(); + assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0); + + assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err()); + } }