X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fpeer_handler.rs;h=c09df17525972ea242fefc1184a5babf070c9802;hb=f53d13bcb8220b3ce39e51a4d20beb23b3930d1f;hp=3c6e6b191aae71b095c53f0b843858b6d90122db;hpb=2c6a078d2c8c9e947a6f4ee0fe42dcdc80ae9fc0;p=rust-lightning diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 3c6e6b19..c09df175 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -19,24 +19,24 @@ use bitcoin::secp256k1::key::{SecretKey,PublicKey}; use ln::features::InitFeatures; use ln::msgs; -use ln::msgs::{ChannelMessageHandler, LightningError, RoutingMessageHandler}; +use ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, RoutingMessageHandler}; use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager}; use util::ser::{VecWriter, Writeable, Writer}; use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep}; use ln::wire; -use util::byte_utils; +use ln::wire::Encode; +use util::atomic_counter::AtomicCounter; use util::events::{MessageSendEvent, MessageSendEventsProvider}; use util::logger::Logger; -use routing::network_graph::NetGraphMsgHandler; +use routing::network_graph::{NetworkGraph, NetGraphMsgHandler}; use prelude::*; use io; use alloc::collections::LinkedList; -use alloc::fmt::Debug; use sync::{Arc, Mutex}; -use core::sync::atomic::{AtomicUsize, Ordering}; use core::{cmp, hash, fmt, mem}; use core::ops::Deref; +use core::convert::Infallible; #[cfg(feature = "std")] use std::error; use bitcoin::hashes::sha256::Hash as Sha256; @@ -66,11 +66,10 @@ impl RoutingMessageHandler for IgnoringMessageHandler { fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result { Ok(false) } fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result { Ok(false) } fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result { Ok(false) } - fn handle_htlc_fail_channel_update(&self, _update: &msgs::HTLCFailChannelUpdate) {} fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option, Option)> { Vec::new() } fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec { Vec::new() } - fn sync_routing_table(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {} + fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {} fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) } @@ -81,28 +80,28 @@ impl Deref for IgnoringMessageHandler { fn deref(&self) -> &Self { self } } -impl wire::Type for () { +// Implement Type for Infallible, note that it cannot be constructed, and thus you can never call a +// method that takes self for it. +impl wire::Type for Infallible { fn type_id(&self) -> u16 { - // We should never call this for `DummyCustomType` unreachable!(); } } - -impl Writeable for () { +impl Writeable for Infallible { fn write(&self, _: &mut W) -> Result<(), io::Error> { unreachable!(); } } impl wire::CustomMessageReader for IgnoringMessageHandler { - type CustomMessage = (); + type CustomMessage = Infallible; fn read(&self, _message_type: u16, _buffer: &mut R) -> Result, msgs::DecodeError> { Ok(None) } } impl CustomMessageHandler for IgnoringMessageHandler { - fn handle_custom_message(&self, _msg: Self::CustomMessage, _sender_node_id: &PublicKey) -> Result<(), LightningError> { + fn handle_custom_message(&self, _msg: Infallible, _sender_node_id: &PublicKey) -> Result<(), LightningError> { // Since we always return `None` in the read the handle method should never be called. unreachable!(); } @@ -286,6 +285,10 @@ enum InitSyncTracker{ NodesSyncing(PublicKey), } +/// The ratio between buffer sizes at which we stop sending initial sync messages vs when we stop +/// forwarding gossip messages to peers altogether. +const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2; + /// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until /// we have fewer than this many messages in the outbound buffer again. /// We also use this as the target number of outbound gossip messages to keep in the write buffer, @@ -293,12 +296,35 @@ enum InitSyncTracker{ const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10; /// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to /// the peer. -const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = 20; +const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO; + +/// If we've sent a ping, and are still awaiting a response, we may need to churn our way through +/// the socket receive buffer before receiving the ping. +/// +/// On a fairly old Arm64 board, with Linux defaults, this can take as long as 20 seconds, not +/// including any network delays, outbound traffic, or the same for messages from other peers. +/// +/// Thus, to avoid needlessly disconnecting a peer, we allow a peer to take this many timer ticks +/// per connected peer to respond to a ping, as long as they send us at least one message during +/// each tick, ensuring we aren't actually just disconnected. +/// With a timer tick interval of ten seconds, this translates to about 40 seconds per connected +/// peer. +/// +/// When we improve parallelism somewhat we should reduce this to e.g. this many timer ticks per +/// two connected peers, assuming most LDK-running systems have at least two cores. +const MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER: i8 = 4; + +/// This is the minimum number of messages we expect a peer to be able to handle within one timer +/// tick. Once we have sent this many messages since the last ping, we send a ping right away to +/// ensures we don't just fill up our send buffer and leave the peer with too many messages to +/// process before the next ping. +const BUFFER_DRAIN_MSGS_PER_TICK: usize = 32; struct Peer { channel_encryptor: PeerChannelEncryptor, their_node_id: Option, their_features: Option, + their_net_address: Option, pending_outbound_buffer: LinkedList>, pending_outbound_buffer_first_msg_offset: usize, @@ -310,7 +336,9 @@ struct Peer { sync_status: InitSyncTracker, - awaiting_pong: bool, + msgs_sent_since_pong: usize, + awaiting_pong_timer_tick_intervals: i8, + received_message_since_timer_tick: bool, } impl Peer { @@ -344,18 +372,14 @@ struct PeerHolder { node_id_to_descriptor: HashMap, } -#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] -fn _check_usize_is_32_or_64() { - // See below, less than 32 bit pointers may be unsafe here! - unsafe { mem::transmute::<*const usize, [u8; 4]>(panic!()); } -} - /// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g. /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case /// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents /// issues such as overly long function definitions. -pub type SimpleArcPeerManager = PeerManager>, Arc, Arc>>, Arc, Arc>; +/// +/// (C-not exported) as Arcs don't make sense in bindings +pub type SimpleArcPeerManager = PeerManager>, Arc, Arc, Arc>>, Arc, Arc>; /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference /// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't @@ -363,7 +387,9 @@ pub type SimpleArcPeerManager = PeerManager = PeerManager, &'e NetGraphMsgHandler<&'g C, &'f L>, &'f L, IgnoringMessageHandler>; +/// +/// (C-not exported) as Arcs don't make sense in bindings +pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager, &'e NetGraphMsgHandler<&'g NetworkGraph, &'h C, &'f L>, &'f L, IgnoringMessageHandler>; /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls /// socket events into messages which it passes on to its [`MessageHandler`]. @@ -395,10 +421,7 @@ pub struct PeerManager PeerManager" for an optional pubkey. +/// This works around `format!()` taking a reference to each argument, preventing +/// `if let Some(node_id) = peer.their_node_id { format!(.., node_id) } else { .. }` from compiling +/// due to lifetime errors. +struct OptionalFromDebugger<'a>(&'a Option); +impl core::fmt::Display for OptionalFromDebugger<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + if let Some(node_id) = self.0 { write!(f, " from {}", log_pubkey!(node_id)) } else { Ok(()) } + } +} + +/// A function used to filter out local or private addresses +/// https://www.iana.org./assignments/ipv4-address-space/ipv4-address-space.xhtml +/// https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +fn filter_addresses(ip_address: Option) -> Option { + match ip_address{ + // For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8) + Some(NetAddress::IPv4{addr: [10, _, _, _], port: _}) => None, + // For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8) + Some(NetAddress::IPv4{addr: [0, _, _, _], port: _}) => None, + // For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10) + Some(NetAddress::IPv4{addr: [100, 64..=127, _, _], port: _}) => None, + // For IPv4 range 127.0.0.0 - 127.255.255.255 (127/8) + Some(NetAddress::IPv4{addr: [127, _, _, _], port: _}) => None, + // For IPv4 range 169.254.0.0 - 169.254.255.255 (169.254/16) + Some(NetAddress::IPv4{addr: [169, 254, _, _], port: _}) => None, + // For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12) + Some(NetAddress::IPv4{addr: [172, 16..=31, _, _], port: _}) => None, + // For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16) + Some(NetAddress::IPv4{addr: [192, 168, _, _], port: _}) => None, + // For IPv4 range 192.88.99.0 - 192.88.99.255 (192.88.99/24) + Some(NetAddress::IPv4{addr: [192, 88, 99, _], port: _}) => None, + // For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3) + Some(NetAddress::IPv6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address, + // For remaining addresses + Some(NetAddress::IPv6{addr: _, port: _}) => None, + Some(..) => ip_address, + None => None, + } +} + impl PeerManager where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, L::Target: Logger, - CMH::Target: CustomMessageHandler + wire::CustomMessageReader { + CMH::Target: CustomMessageHandler { /// Constructs a new PeerManager with the given message handlers and node_id secret key /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be /// cryptographically secure random bytes. @@ -486,8 +550,7 @@ impl P }), our_node_secret, ephemeral_key_midstate, - peer_counter_low: AtomicUsize::new(0), - peer_counter_high: AtomicUsize::new(0), + peer_counter: AtomicCounter::new(), logger, custom_message_handler, } @@ -510,18 +573,18 @@ impl P fn get_ephemeral_key(&self) -> SecretKey { let mut ephemeral_hash = self.ephemeral_key_midstate.clone(); - let low = self.peer_counter_low.fetch_add(1, Ordering::AcqRel); - let high = if low == 0 { - self.peer_counter_high.fetch_add(1, Ordering::AcqRel) - } else { - self.peer_counter_high.load(Ordering::Acquire) - }; - ephemeral_hash.input(&byte_utils::le64_to_array(low as u64)); - ephemeral_hash.input(&byte_utils::le64_to_array(high as u64)); + let counter = self.peer_counter.get_increment(); + ephemeral_hash.input(&counter.to_le_bytes()); SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!") } - /// Indicates a new outbound connection has been established to a node with the given node_id. + /// Indicates a new outbound connection has been established to a node with the given node_id + /// and an optional remote network address. + /// + /// The remote network address adds the option to report a remote IP address back to a connecting + /// peer using the init message. + /// The user should pass the remote network address of the host they are connected to. + /// /// Note that if an Err is returned here you MUST NOT call socket_disconnected for the new /// descriptor but must disconnect the connection immediately. /// @@ -531,7 +594,7 @@ impl P /// [`socket_disconnected()`]. /// /// [`socket_disconnected()`]: PeerManager::socket_disconnected - pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor) -> Result, PeerHandleError> { + pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option) -> Result, PeerHandleError> { let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key()); let res = peer_encryptor.get_act_one().to_vec(); let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes @@ -541,6 +604,7 @@ impl P channel_encryptor: peer_encryptor, their_node_id: None, their_features: None, + their_net_address: remote_network_address, pending_outbound_buffer: LinkedList::new(), pending_outbound_buffer_first_msg_offset: 0, @@ -552,14 +616,21 @@ impl P sync_status: InitSyncTracker::NoSyncRequested, - awaiting_pong: false, + msgs_sent_since_pong: 0, + awaiting_pong_timer_tick_intervals: 0, + received_message_since_timer_tick: false, }).is_some() { panic!("PeerManager driver duplicated descriptors!"); }; Ok(res) } - /// Indicates a new inbound connection has been established. + /// Indicates a new inbound connection has been established to a node with an optional remote + /// network address. + /// + /// The remote network address adds the option to report a remote IP address back to a connecting + /// peer using the init message. + /// The user should pass the remote network address of the host they are connected to. /// /// May refuse the connection by returning an Err, but will never write bytes to the remote end /// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT @@ -570,7 +641,7 @@ impl P /// [`socket_disconnected()`]. /// /// [`socket_disconnected()`]: PeerManager::socket_disconnected - pub fn new_inbound_connection(&self, descriptor: Descriptor) -> Result<(), PeerHandleError> { + pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option) -> Result<(), PeerHandleError> { let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret); let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes @@ -579,6 +650,7 @@ impl P channel_encryptor: peer_encryptor, their_node_id: None, their_features: None, + their_net_address: remote_network_address, pending_outbound_buffer: LinkedList::new(), pending_outbound_buffer_first_msg_offset: 0, @@ -590,7 +662,9 @@ impl P sync_status: InitSyncTracker::NoSyncRequested, - awaiting_pong: false, + msgs_sent_since_pong: 0, + awaiting_pong_timer_tick_intervals: 0, + received_message_since_timer_tick: false, }).is_some() { panic!("PeerManager driver duplicated descriptors!"); }; @@ -599,7 +673,7 @@ impl P fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) { while !peer.awaiting_write_event { - if peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE { + if peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE && peer.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK { match peer.sync_status { InitSyncTracker::NoSyncRequested => {}, InitSyncTracker::ChannelsSyncing(c) if c < 0xffff_ffff_ffff_ffff => { @@ -644,6 +718,9 @@ impl P }, } } + if peer.msgs_sent_since_pong >= BUFFER_DRAIN_MSGS_PER_TICK { + self.maybe_send_extra_ping(peer); + } if { let next_buff = match peer.pending_outbound_buffer.front() { @@ -719,14 +796,23 @@ impl P } } - /// Append a message to a peer's pending outbound/write buffer, and update the map of peers needing sends accordingly. - fn enqueue_message(&self, peer: &mut Peer, message: &M) { - let mut buffer = VecWriter(Vec::new()); + /// Append a message to a peer's pending outbound/write buffer + fn enqueue_encoded_message(&self, peer: &mut Peer, encoded_message: &Vec) { + peer.msgs_sent_since_pong += 1; + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..])); + } + + /// Append a message to a peer's pending outbound/write buffer + fn enqueue_message(&self, peer: &mut Peer, message: &M) { + let mut buffer = VecWriter(Vec::with_capacity(2048)); wire::write(message, &mut buffer).unwrap(); // crash if the write failed - let encoded_message = buffer.0; - log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..])); + if is_gossip_msg(message.type_id()) { + log_gossip!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())); + } else { + log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())) + } + self.enqueue_encoded_message(peer, &buffer.0); } fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result { @@ -766,19 +852,25 @@ impl P match e.action { msgs::ErrorAction::DisconnectPeer { msg: _ } => { //TODO: Try to push msg - log_debug!(self.logger, "Error handling message; disconnecting peer with: {}", e.err); + log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer.their_node_id), e.err); return Err(PeerHandleError{ no_connection_possible: false }); }, msgs::ErrorAction::IgnoreAndLog(level) => { - log_given_level!(self.logger, level, "Error handling message; ignoring: {}", e.err); + log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err); continue }, + msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these msgs::ErrorAction::IgnoreError => { - log_debug!(self.logger, "Error handling message; ignoring: {}", e.err); + log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err); continue; }, msgs::ErrorAction::SendErrorMessage { msg } => { - log_debug!(self.logger, "Error handling message; sending error message with: {}", e.err); + log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer.their_node_id), e.err); + self.enqueue_message(peer, &msg); + continue; + }, + msgs::ErrorAction::SendWarningMessage { msg, log_level } => { + log_given_level!(self.logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer.their_node_id), e.err); self.enqueue_message(peer, &msg); continue; }, @@ -820,8 +912,9 @@ impl P peer.their_node_id = Some(their_node_id); insert_node_id!(); let features = InitFeatures::known(); - let resp = msgs::Init { features }; + let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone())}; self.enqueue_message(peer, &resp); + peer.awaiting_pong_timer_tick_intervals = 0; }, NextNoiseStep::ActThree => { let their_node_id = try_potential_handleerror!(peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..])); @@ -830,8 +923,9 @@ impl P peer.their_node_id = Some(their_node_id); insert_node_id!(); let features = InitFeatures::known(); - let resp = msgs::Init { features }; + let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone())}; self.enqueue_message(peer, &resp); + peer.awaiting_pong_timer_tick_intervals = 0; }, NextNoiseStep::NoiseComplete => { if peer.pending_read_is_header { @@ -856,25 +950,40 @@ impl P Ok(x) => x, Err(e) => { match e { - msgs::DecodeError::UnknownVersion => return Err(PeerHandleError { no_connection_possible: false }), - msgs::DecodeError::UnknownRequiredFeature => { - log_trace!(self.logger, "Got a channel/node announcement with an known required feature flag, you may want to update!"); + // Note that to avoid recursion we never call + // `do_attempt_write_data` from here, causing + // the messages enqueued here to not actually + // be sent before the peer is disconnected. + (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => { + log_gossip!(self.logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!"); + continue; + } + (msgs::DecodeError::UnsupportedCompression, _) => { + log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message"); + self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unsupported message compression: zlib".to_owned() }); + continue; + } + (_, Some(ty)) if is_gossip_msg(ty) => { + log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message"); + self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unreadable/bogus gossip message".to_owned() }); continue; } - msgs::DecodeError::InvalidValue => { + (msgs::DecodeError::UnknownRequiredFeature, ty) => { + log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!"); + self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) }); + return Err(PeerHandleError { no_connection_possible: false }); + } + (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }), + (msgs::DecodeError::InvalidValue, _) => { log_debug!(self.logger, "Got an invalid value while deserializing message"); return Err(PeerHandleError { no_connection_possible: false }); } - msgs::DecodeError::ShortRead => { + (msgs::DecodeError::ShortRead, _) => { log_debug!(self.logger, "Deserialization failed due to shortness of message"); return Err(PeerHandleError { no_connection_possible: false }); } - msgs::DecodeError::BadLengthDescriptor => return Err(PeerHandleError { no_connection_possible: false }), - msgs::DecodeError::Io(_) => return Err(PeerHandleError { no_connection_possible: false }), - msgs::DecodeError::UnsupportedCompression => { - log_trace!(self.logger, "We don't support zlib-compressed message fields, ignoring message"); - continue; - } + (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }), + (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }), } } }; @@ -919,7 +1028,13 @@ impl P peer: &mut Peer, message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage> ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> { - log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); + if is_gossip_msg(message.type_id()) { + log_gossip!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); + } else { + log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); + } + + peer.received_message_since_timer_tick = true; // Need an Init as first message if let wire::Message::Init(_) = message { @@ -941,7 +1056,7 @@ impl P return Err(PeerHandleError{ no_connection_possible: false }.into()); } - log_info!(self.logger, "Received peer Init message: {}", msg.features); + log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.features); if msg.features.initial_routing_sync() { peer.sync_status = InitSyncTracker::ChannelsSyncing(0); @@ -951,7 +1066,7 @@ impl P return Err(PeerHandleError{ no_connection_possible: true }.into()); } - self.message_handler.route_handler.sync_routing_table(&peer.their_node_id.unwrap(), &msg); + self.message_handler.route_handler.peer_connected(&peer.their_node_id.unwrap(), &msg); self.message_handler.chan_handler.peer_connected(&peer.their_node_id.unwrap(), &msg); peer.their_features = Some(msg.features); @@ -975,6 +1090,21 @@ impl P return Err(PeerHandleError{ no_connection_possible: true }.into()); } }, + wire::Message::Warning(msg) => { + let mut data_is_printable = true; + for b in msg.data.bytes() { + if b < 32 || b > 126 { + data_is_printable = false; + break; + } + } + + if data_is_printable { + log_debug!(self.logger, "Got warning message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.data); + } else { + log_debug!(self.logger, "Got warning message from {} with non-ASCII error message", log_pubkey!(peer.their_node_id.unwrap())); + } + }, wire::Message::Ping(msg) => { if msg.ponglen < 65532 { @@ -983,7 +1113,8 @@ impl P } }, wire::Message::Pong(_msg) => { - peer.awaiting_pong = false; + peer.awaiting_pong_timer_tick_intervals = 0; + peer.msgs_sent_since_pong = 0; }, // Channel messages: @@ -1096,7 +1227,7 @@ impl P fn forward_broadcast_msg(&self, peers: &mut PeerHolder, msg: &wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) { match msg { wire::Message::ChannelAnnouncement(ref msg) => { - log_trace!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); + log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1104,8 +1235,10 @@ impl P !peer.should_forward_channel_announcement(msg.contents.short_channel_id) { continue } - if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP { - log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO + { + log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id_1) || @@ -1115,11 +1248,11 @@ impl P if except_node.is_some() && peer.their_node_id.as_ref() == except_node { continue; } - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_msg[..])); + self.enqueue_encoded_message(peer, &encoded_msg); } }, wire::Message::NodeAnnouncement(ref msg) => { - log_trace!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg); + log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1127,8 +1260,10 @@ impl P !peer.should_forward_node_announcement(msg.contents.node_id) { continue } - if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP { - log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO + { + log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id) { @@ -1137,11 +1272,11 @@ impl P if except_node.is_some() && peer.their_node_id.as_ref() == except_node { continue; } - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_msg[..])); + self.enqueue_encoded_message(peer, &encoded_msg); } }, wire::Message::ChannelUpdate(ref msg) => { - log_trace!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg); + log_gossip!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1149,14 +1284,16 @@ impl P !peer.should_forward_channel_announcement(msg.contents.short_channel_id) { continue } - if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP { - log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO + { + log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if except_node.is_some() && peer.their_node_id.as_ref() == except_node { continue; } - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_msg[..])); + self.enqueue_encoded_message(peer, &encoded_msg); } }, _ => debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages"), @@ -1170,6 +1307,9 @@ impl P /// May call [`send_data`] on [`SocketDescriptor`]s. Thus, be very careful with reentrancy /// issues! /// + /// You don't have to call this function explicitly if you are using [`lightning-net-tokio`] + /// or one of the other clients provided in our language bindings. + /// /// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment /// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards /// [`send_data`]: SocketDescriptor::send_data @@ -1295,21 +1435,31 @@ impl P }, MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => { log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); - if self.message_handler.route_handler.handle_channel_announcement(&msg).is_ok() && self.message_handler.route_handler.handle_channel_update(&update_msg).is_ok() { - self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None); - self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None); + match self.message_handler.route_handler.handle_channel_announcement(&msg) { + Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => + self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None), + _ => {}, + } + match self.message_handler.route_handler.handle_channel_update(&update_msg) { + Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => + self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None), + _ => {}, } }, MessageSendEvent::BroadcastNodeAnnouncement { msg } => { log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler"); - if self.message_handler.route_handler.handle_node_announcement(&msg).is_ok() { - self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None); + match self.message_handler.route_handler.handle_node_announcement(&msg) { + Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => + self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None), + _ => {}, } }, MessageSendEvent::BroadcastChannelUpdate { msg } => { log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id); - if self.message_handler.route_handler.handle_channel_update(&msg).is_ok() { - self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None); + match self.message_handler.route_handler.handle_channel_update(&msg) { + Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => + self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None), + _ => {}, } }, MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { @@ -1318,9 +1468,6 @@ impl P let peer = get_peer_for_forwarding!(node_id); peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); }, - MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => { - self.message_handler.route_handler.handle_htlc_fail_channel_update(update); - }, MessageSendEvent::HandleError { ref node_id, ref action } => { match *action { msgs::ErrorAction::DisconnectPeer { ref msg } => { @@ -1335,7 +1482,7 @@ impl P // room in the send buffer, put the error message there... self.do_attempt_write_data(&mut descriptor, &mut peer); } else { - log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id)); + log_gossip!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id)); } } descriptor.disconnect_socket(); @@ -1345,6 +1492,7 @@ impl P msgs::ErrorAction::IgnoreAndLog(level) => { log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); }, + msgs::ErrorAction::IgnoreDuplicateGossip => {}, msgs::ErrorAction::IgnoreError => { log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); }, @@ -1354,6 +1502,12 @@ impl P msg.data); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, + msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => { + log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}", + log_pubkey!(node_id), + msg.data); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); + }, } }, MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => { @@ -1363,7 +1517,7 @@ impl P self.enqueue_message(get_peer_for_forwarding!(node_id), msg); } MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => { - log_trace!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", + log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", log_pubkey!(node_id), msg.short_channel_ids.len(), msg.first_blocknum, @@ -1371,6 +1525,9 @@ impl P msg.sync_complete); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); } + MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => { + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); + } } } @@ -1432,12 +1589,43 @@ impl P } } + /// Disconnects all currently-connected peers. This is useful on platforms where there may be + /// an indication that TCP sockets have stalled even if we weren't around to time them out + /// using regular ping/pongs. + pub fn disconnect_all_peers(&self) { + let mut peers_lock = self.peers.lock().unwrap(); + let peers = &mut *peers_lock; + for (mut descriptor, peer) in peers.peers.drain() { + if let Some(node_id) = peer.their_node_id { + log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id); + peers.node_id_to_descriptor.remove(&node_id); + self.message_handler.chan_handler.peer_disconnected(&node_id, false); + } + descriptor.disconnect_socket(); + } + debug_assert!(peers.node_id_to_descriptor.is_empty()); + } + + /// This is called when we're blocked on sending additional gossip messages until we receive a + /// pong. If we aren't waiting on a pong, we take this opportunity to send a ping (setting + /// `awaiting_pong_timer_tick_intervals` to a special flag value to indicate this). + fn maybe_send_extra_ping(&self, peer: &mut Peer) { + if peer.awaiting_pong_timer_tick_intervals == 0 { + peer.awaiting_pong_timer_tick_intervals = -1; + let ping = msgs::Ping { + ponglen: 0, + byteslen: 64, + }; + self.enqueue_message(peer, &ping); + } + } + /// Send pings to each peer and disconnect those which did not respond to the last round of /// pings. /// - /// This may be called on any timescale you want, however, roughly once every five to ten - /// seconds is preferred. The call rate determines both how often we send a ping to our peers - /// and how much time they have to respond before we disconnect them. + /// This may be called on any timescale you want, however, roughly once every ten seconds is + /// preferred. The call rate determines both how often we send a ping to our peers and how much + /// time they have to respond before we disconnect them. /// /// May call [`send_data`] on all [`SocketDescriptor`]s. Thus, be very careful with reentrancy /// issues! @@ -1450,9 +1638,35 @@ impl P let node_id_to_descriptor = &mut peers.node_id_to_descriptor; let peers = &mut peers.peers; let mut descriptors_needing_disconnect = Vec::new(); + let peer_count = peers.len(); peers.retain(|descriptor, peer| { - if peer.awaiting_pong { + let mut do_disconnect_peer = false; + if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_node_id.is_none() { + // The peer needs to complete its handshake before we can exchange messages. We + // give peers one timer tick to complete handshake, reusing + // `awaiting_pong_timer_tick_intervals` to track number of timer ticks taken + // for handshake completion. + if peer.awaiting_pong_timer_tick_intervals != 0 { + do_disconnect_peer = true; + } else { + peer.awaiting_pong_timer_tick_intervals = 1; + return true; + } + } + + if peer.awaiting_pong_timer_tick_intervals == -1 { + // Magic value set in `maybe_send_extra_ping`. + peer.awaiting_pong_timer_tick_intervals = 1; + peer.received_message_since_timer_tick = false; + return true; + } + + if do_disconnect_peer + || (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick) + || peer.awaiting_pong_timer_tick_intervals as u64 > + MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peer_count as u64 + { descriptors_needing_disconnect.push(descriptor.clone()); match peer.their_node_id { Some(node_id) => { @@ -1460,30 +1674,25 @@ impl P node_id_to_descriptor.remove(&node_id); self.message_handler.chan_handler.peer_disconnected(&node_id, false); } - None => { - // This can't actually happen as we should have hit - // is_ready_for_encryption() previously on this same peer. - unreachable!(); - }, + None => {}, } return false; } + peer.received_message_since_timer_tick = false; - if !peer.channel_encryptor.is_ready_for_encryption() { - // The peer needs to complete its handshake before we can exchange messages + if peer.awaiting_pong_timer_tick_intervals > 0 { + peer.awaiting_pong_timer_tick_intervals += 1; return true; } + peer.awaiting_pong_timer_tick_intervals = 1; let ping = msgs::Ping { ponglen: 0, byteslen: 64, }; self.enqueue_message(peer, &ping); + self.do_attempt_write_data(&mut (descriptor.clone()), &mut *peer); - let mut descriptor_clone = descriptor.clone(); - self.do_attempt_write_data(&mut descriptor_clone, peer); - - peer.awaiting_pong = true; true }); @@ -1494,10 +1703,24 @@ impl P } } +fn is_gossip_msg(type_id: u16) -> bool { + match type_id { + msgs::ChannelAnnouncement::TYPE | + msgs::ChannelUpdate::TYPE | + msgs::NodeAnnouncement::TYPE | + msgs::QueryChannelRange::TYPE | + msgs::ReplyChannelRange::TYPE | + msgs::QueryShortChannelIds::TYPE | + msgs::ReplyShortChannelIdsEnd::TYPE => true, + _ => false + } +} + #[cfg(test)] mod tests { - use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler}; + use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses}; use ln::msgs; + use ln::msgs::NetAddress; use util::events; use util::test_utils; @@ -1573,8 +1796,8 @@ mod tests { let a_id = PublicKey::from_secret_key(&secp_ctx, &peer_a.our_node_secret); let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; - let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone()).unwrap(); - peer_a.new_inbound_connection(fd_a.clone()).unwrap(); + let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap(); + peer_a.new_inbound_connection(fd_a.clone(), None).unwrap(); assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false); peer_a.process_events(); assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); @@ -1642,11 +1865,23 @@ mod tests { // than can fit into a peer's buffer). let (mut fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]); - // Make each peer to read the messages that the other peer just wrote to them. - peers[0].process_events(); - peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(); - peers[1].process_events(); - peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap(); + // Make each peer to read the messages that the other peer just wrote to them. Note that + // due to the max-messagse-before-ping limits this may take a few iterations to complete. + for _ in 0..150/super::BUFFER_DRAIN_MSGS_PER_TICK + 1 { + peers[0].process_events(); + let b_read_data = fd_a.outbound_data.lock().unwrap().split_off(0); + assert!(!b_read_data.is_empty()); + + peers[1].read_event(&mut fd_b, &b_read_data).unwrap(); + peers[1].process_events(); + + let a_read_data = fd_b.outbound_data.lock().unwrap().split_off(0); + assert!(!a_read_data.is_empty()); + peers[0].read_event(&mut fd_a, &a_read_data).unwrap(); + + peers[1].process_events(); + assert_eq!(fd_b.outbound_data.lock().unwrap().len(), 0, "Until B receives data, it shouldn't send more messages"); + } // Check that each peer has received the expected number of channel updates and channel // announcements. @@ -1655,4 +1890,133 @@ mod tests { assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100); assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50); } + + #[test] + fn test_handshake_timeout() { + // Tests that we time out a peer still waiting on handshake completion after a full timer + // tick. + let cfgs = create_peermgr_cfgs(2); + cfgs[0].routing_handler.request_full_sync.store(true, Ordering::Release); + cfgs[1].routing_handler.request_full_sync.store(true, Ordering::Release); + let peers = create_network(2, &cfgs); + + let secp_ctx = Secp256k1::new(); + let a_id = PublicKey::from_secret_key(&secp_ctx, &peers[0].our_node_secret); + let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; + let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; + let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone(), None).unwrap(); + peers[0].new_inbound_connection(fd_a.clone(), None).unwrap(); + + // If we get a single timer tick before completion, that's fine + assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1); + peers[0].timer_tick_occurred(); + assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1); + + assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false); + peers[0].process_events(); + assert_eq!(peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); + peers[1].process_events(); + + // ...but if we get a second timer tick, we should disconnect the peer + peers[0].timer_tick_occurred(); + assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0); + + assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err()); + } + + #[test] + fn test_filter_addresses(){ + // Tests the filter_addresses function. + + // For (10/8) + let ip_address = NetAddress::IPv4{addr: [10, 0, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [10, 0, 255, 201], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [10, 255, 255, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + + // For (0/8) + let ip_address = NetAddress::IPv4{addr: [0, 0, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [0, 0, 255, 187], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [0, 255, 255, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + + // For (100.64/10) + let ip_address = NetAddress::IPv4{addr: [100, 64, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [100, 78, 255, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [100, 127, 255, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + + // For (127/8) + let ip_address = NetAddress::IPv4{addr: [127, 0, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [127, 65, 73, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [127, 255, 255, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + + // For (169.254/16) + let ip_address = NetAddress::IPv4{addr: [169, 254, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [169, 254, 221, 101], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [169, 254, 255, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + + // For (172.16/12) + let ip_address = NetAddress::IPv4{addr: [172, 16, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [172, 27, 101, 23], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [172, 31, 255, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + + // For (192.168/16) + let ip_address = NetAddress::IPv4{addr: [192, 168, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [192, 168, 205, 159], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [192, 168, 255, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + + // For (192.88.99/24) + let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 140], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + + // For other IPv4 addresses + let ip_address = NetAddress::IPv4{addr: [188, 255, 99, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); + let ip_address = NetAddress::IPv4{addr: [123, 8, 129, 14], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); + let ip_address = NetAddress::IPv4{addr: [2, 88, 9, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); + + // For (2000::/3) + let ip_address = NetAddress::IPv6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); + let ip_address = NetAddress::IPv6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); + let ip_address = NetAddress::IPv6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); + + // For other IPv6 addresses + let ip_address = NetAddress::IPv6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + let ip_address = NetAddress::IPv6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000}; + assert_eq!(filter_addresses(Some(ip_address.clone())), None); + + // For (None) + assert_eq!(filter_addresses(None), None); + } }