X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fpeer_handler.rs;h=89f3e9ff5ffa48233086af330cb0fdb6ccf58f15;hb=cd4dc39a8c4732bea1a3221617f86e34dfb7efb8;hp=0dbad0a311a1794b5f811259785768936dc2dda4;hpb=ed4a39fe1e7381d4eb8ac9e5b3a178cf047888dc;p=rust-lightning diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 0dbad0a3..89f3e9ff 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -24,16 +24,16 @@ use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager}; use util::ser::{VecWriter, Writeable, Writer}; use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep}; use ln::wire; -use util::byte_utils; +use ln::wire::Encode; +use util::atomic_counter::AtomicCounter; use util::events::{MessageSendEvent, MessageSendEventsProvider}; use util::logger::Logger; -use routing::network_graph::NetGraphMsgHandler; +use routing::network_graph::{NetworkGraph, NetGraphMsgHandler}; use prelude::*; use io; use alloc::collections::LinkedList; use sync::{Arc, Mutex}; -use core::sync::atomic::{AtomicUsize, Ordering}; use core::{cmp, hash, fmt, mem}; use core::ops::Deref; use core::convert::Infallible; @@ -371,18 +371,12 @@ struct PeerHolder { node_id_to_descriptor: HashMap, } -#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] -fn _check_usize_is_32_or_64() { - // See below, less than 32 bit pointers may be unsafe here! - unsafe { mem::transmute::<*const usize, [u8; 4]>(panic!()); } -} - /// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g. /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case /// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents /// issues such as overly long function definitions. -pub type SimpleArcPeerManager = PeerManager>, Arc, Arc>>, Arc, Arc>; +pub type SimpleArcPeerManager = PeerManager>, Arc, Arc, Arc>>, Arc, Arc>; /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference /// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't @@ -390,7 +384,7 @@ pub type SimpleArcPeerManager = PeerManager = PeerManager, &'e NetGraphMsgHandler<&'g C, &'f L>, &'f L, IgnoringMessageHandler>; +pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager, &'e NetGraphMsgHandler<&'g NetworkGraph, &'h C, &'f L>, &'f L, IgnoringMessageHandler>; /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls /// socket events into messages which it passes on to its [`MessageHandler`]. @@ -422,10 +416,7 @@ pub struct PeerManager PeerManager" for an optional pubkey. +/// This works around `format!()` taking a reference to each argument, preventing +/// `if let Some(node_id) = peer.their_node_id { format!(.., node_id) } else { .. }` from compiling +/// due to lifetime errors. +struct OptionalFromDebugger<'a>(&'a Option); +impl core::fmt::Display for OptionalFromDebugger<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + if let Some(node_id) = self.0 { write!(f, " from {}", log_pubkey!(node_id)) } else { Ok(()) } + } +} + impl PeerManager where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, @@ -513,8 +515,7 @@ impl P }), our_node_secret, ephemeral_key_midstate, - peer_counter_low: AtomicUsize::new(0), - peer_counter_high: AtomicUsize::new(0), + peer_counter: AtomicCounter::new(), logger, custom_message_handler, } @@ -537,14 +538,8 @@ impl P fn get_ephemeral_key(&self) -> SecretKey { let mut ephemeral_hash = self.ephemeral_key_midstate.clone(); - let low = self.peer_counter_low.fetch_add(1, Ordering::AcqRel); - let high = if low == 0 { - self.peer_counter_high.fetch_add(1, Ordering::AcqRel) - } else { - self.peer_counter_high.load(Ordering::Acquire) - }; - ephemeral_hash.input(&byte_utils::le64_to_array(low as u64)); - ephemeral_hash.input(&byte_utils::le64_to_array(high as u64)); + let counter = self.peer_counter.get_increment(); + ephemeral_hash.input(&counter.to_le_bytes()); SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!") } @@ -763,7 +758,12 @@ impl P fn enqueue_message(&self, peer: &mut Peer, message: &M) { let mut buffer = VecWriter(Vec::with_capacity(2048)); wire::write(message, &mut buffer).unwrap(); // crash if the write failed - log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())); + + if is_gossip_msg(message.type_id()) { + log_gossip!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())); + } else { + log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())) + } self.enqueue_encoded_message(peer, &buffer.0); } @@ -804,19 +804,19 @@ impl P match e.action { msgs::ErrorAction::DisconnectPeer { msg: _ } => { //TODO: Try to push msg - log_debug!(self.logger, "Error handling message; disconnecting peer with: {}", e.err); + log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer.their_node_id), e.err); return Err(PeerHandleError{ no_connection_possible: false }); }, msgs::ErrorAction::IgnoreAndLog(level) => { - log_given_level!(self.logger, level, "Error handling message; ignoring: {}", e.err); + log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err); continue }, msgs::ErrorAction::IgnoreError => { - log_debug!(self.logger, "Error handling message; ignoring: {}", e.err); + log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err); continue; }, msgs::ErrorAction::SendErrorMessage { msg } => { - log_debug!(self.logger, "Error handling message; sending error message with: {}", e.err); + log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer.their_node_id), e.err); self.enqueue_message(peer, &msg); continue; }, @@ -860,6 +860,7 @@ impl P let features = InitFeatures::known(); let resp = msgs::Init { features }; self.enqueue_message(peer, &resp); + peer.awaiting_pong_timer_tick_intervals = 0; }, NextNoiseStep::ActThree => { let their_node_id = try_potential_handleerror!(peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..])); @@ -870,6 +871,7 @@ impl P let features = InitFeatures::known(); let resp = msgs::Init { features }; self.enqueue_message(peer, &resp); + peer.awaiting_pong_timer_tick_intervals = 0; }, NextNoiseStep::NoiseComplete => { if peer.pending_read_is_header { @@ -896,7 +898,7 @@ impl P match e { msgs::DecodeError::UnknownVersion => return Err(PeerHandleError { no_connection_possible: false }), msgs::DecodeError::UnknownRequiredFeature => { - log_trace!(self.logger, "Got a channel/node announcement with an known required feature flag, you may want to update!"); + log_gossip!(self.logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!"); continue; } msgs::DecodeError::InvalidValue => { @@ -910,7 +912,7 @@ impl P msgs::DecodeError::BadLengthDescriptor => return Err(PeerHandleError { no_connection_possible: false }), msgs::DecodeError::Io(_) => return Err(PeerHandleError { no_connection_possible: false }), msgs::DecodeError::UnsupportedCompression => { - log_trace!(self.logger, "We don't support zlib-compressed message fields, ignoring message"); + log_gossip!(self.logger, "We don't support zlib-compressed message fields, ignoring message"); continue; } } @@ -957,7 +959,12 @@ impl P peer: &mut Peer, message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage> ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> { - log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); + if is_gossip_msg(message.type_id()) { + log_gossip!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); + } else { + log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); + } + peer.received_message_since_timer_tick = true; // Need an Init as first message @@ -980,7 +987,7 @@ impl P return Err(PeerHandleError{ no_connection_possible: false }.into()); } - log_info!(self.logger, "Received peer Init message: {}", msg.features); + log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.features); if msg.features.initial_routing_sync() { peer.sync_status = InitSyncTracker::ChannelsSyncing(0); @@ -1136,7 +1143,7 @@ impl P fn forward_broadcast_msg(&self, peers: &mut PeerHolder, msg: &wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) { match msg { wire::Message::ChannelAnnouncement(ref msg) => { - log_trace!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); + log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1147,7 +1154,7 @@ impl P if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO { - log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id_1) || @@ -1161,7 +1168,7 @@ impl P } }, wire::Message::NodeAnnouncement(ref msg) => { - log_trace!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg); + log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1172,7 +1179,7 @@ impl P if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO { - log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id) { @@ -1185,7 +1192,7 @@ impl P } }, wire::Message::ChannelUpdate(ref msg) => { - log_trace!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg); + log_gossip!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1196,7 +1203,7 @@ impl P if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO { - log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if except_node.is_some() && peer.their_node_id.as_ref() == except_node { @@ -1381,7 +1388,7 @@ impl P // room in the send buffer, put the error message there... self.do_attempt_write_data(&mut descriptor, &mut peer); } else { - log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id)); + log_gossip!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id)); } } descriptor.disconnect_socket(); @@ -1409,7 +1416,7 @@ impl P self.enqueue_message(get_peer_for_forwarding!(node_id), msg); } MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => { - log_trace!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", + log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", log_pubkey!(node_id), msg.short_channel_ids.len(), msg.first_blocknum, @@ -1530,12 +1537,29 @@ impl P let peer_count = peers.len(); peers.retain(|descriptor, peer| { - if !peer.channel_encryptor.is_ready_for_encryption() { - // The peer needs to complete its handshake before we can exchange messages + let mut do_disconnect_peer = false; + if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_node_id.is_none() { + // The peer needs to complete its handshake before we can exchange messages. We + // give peers one timer tick to complete handshake, reusing + // `awaiting_pong_timer_tick_intervals` to track number of timer ticks taken + // for handshake completion. + if peer.awaiting_pong_timer_tick_intervals != 0 { + do_disconnect_peer = true; + } else { + peer.awaiting_pong_timer_tick_intervals = 1; + return true; + } + } + + if peer.awaiting_pong_timer_tick_intervals == -1 { + // Magic value set in `maybe_send_extra_ping`. + peer.awaiting_pong_timer_tick_intervals = 1; + peer.received_message_since_timer_tick = false; return true; } - if (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick) + if do_disconnect_peer + || (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick) || peer.awaiting_pong_timer_tick_intervals as u64 > MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peer_count as u64 { @@ -1546,21 +1570,11 @@ impl P node_id_to_descriptor.remove(&node_id); self.message_handler.chan_handler.peer_disconnected(&node_id, false); } - None => { - // This can't actually happen as we should have hit - // is_ready_for_encryption() previously on this same peer. - unreachable!(); - }, + None => {}, } return false; } - peer.received_message_since_timer_tick = false; - if peer.awaiting_pong_timer_tick_intervals == -1 { - // Magic value set in `maybe_send_extra_ping`. - peer.awaiting_pong_timer_tick_intervals = 1; - return true; - } if peer.awaiting_pong_timer_tick_intervals > 0 { peer.awaiting_pong_timer_tick_intervals += 1; @@ -1585,6 +1599,15 @@ impl P } } +fn is_gossip_msg(type_id: u16) -> bool { + match type_id { + msgs::ChannelAnnouncement::TYPE | + msgs::ChannelUpdate::TYPE | + msgs::NodeAnnouncement::TYPE => true, + _ => false + } +} + #[cfg(test)] mod tests { use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler}; @@ -1758,4 +1781,37 @@ mod tests { assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100); assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50); } + + #[test] + fn test_handshake_timeout() { + // Tests that we time out a peer still waiting on handshake completion after a full timer + // tick. + let cfgs = create_peermgr_cfgs(2); + cfgs[0].routing_handler.request_full_sync.store(true, Ordering::Release); + cfgs[1].routing_handler.request_full_sync.store(true, Ordering::Release); + let peers = create_network(2, &cfgs); + + let secp_ctx = Secp256k1::new(); + let a_id = PublicKey::from_secret_key(&secp_ctx, &peers[0].our_node_secret); + let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; + let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; + let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone()).unwrap(); + peers[0].new_inbound_connection(fd_a.clone()).unwrap(); + + // If we get a single timer tick before completion, that's fine + assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1); + peers[0].timer_tick_occurred(); + assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1); + + assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false); + peers[0].process_events(); + assert_eq!(peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); + peers[1].process_events(); + + // ...but if we get a second timer tick, we should disconnect the peer + peers[0].timer_tick_occurred(); + assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0); + + assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err()); + } }