X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fpeer_handler.rs;h=9ce230861456927f53bf4a2b65dd33e800e17e5d;hb=8c334cb579a7fc2688744f0d22279fe37ae81eff;hp=6ffffec4dd0bd94febc085ed998051c7090da5eb;hpb=4b70921c958181e43dc056dc05ef59427d13f2af;p=rust-lightning diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 6ffffec4..9ce23086 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -24,25 +24,22 @@ use crate::ln::ChannelId; use crate::ln::features::{InitFeatures, NodeFeatures}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler}; -#[cfg(not(c_bindings))] -use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager}; use crate::util::ser::{VecWriter, Writeable, Writer}; use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE}; use crate::ln::wire; use crate::ln::wire::{Encode, Type}; -#[cfg(not(c_bindings))] -use crate::onion_message::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger}; use crate::onion_message::messenger::{CustomOnionMessageHandler, PendingOnionMessage}; use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; use crate::onion_message::packet::OnionMessageContents; use crate::routing::gossip::{NodeId, NodeAlias}; use crate::util::atomic_counter::AtomicCounter; -use crate::util::logger::{Logger, WithContext}; +use crate::util::logger::{Level, Logger, WithContext}; use crate::util::string::PrintableString; +#[allow(unused_imports)] use crate::prelude::*; + use crate::io; -use alloc::collections::VecDeque; use crate::sync::{Mutex, MutexGuard, FairRwLock}; use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering}; use core::{cmp, hash, fmt, mem}; @@ -52,6 +49,8 @@ use core::convert::Infallible; use std::error; #[cfg(not(c_bindings))] use { + crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager}, + crate::onion_message::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger}, crate::routing::gossip::{NetworkGraph, P2PGossipSync}, crate::sign::KeysManager, crate::sync::Arc, @@ -118,7 +117,9 @@ impl RoutingMessageHandler for IgnoringMessageHandler { fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) } fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { - InitFeatures::empty() + let mut features = InitFeatures::empty(); + features.set_gossip_queries_optional(); + features } fn processing_queue_high(&self) -> bool { false } } @@ -224,10 +225,10 @@ impl ChannelMessageHandler for ErroringMessageHandler { // Any messages which are related to a specific channel generate an error message to let the // peer know we don't care about channels. fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) { - ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id); + ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id); } fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) { - ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id); + ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id); } fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) { ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id); @@ -247,12 +248,15 @@ impl ChannelMessageHandler for ErroringMessageHandler { fn handle_stfu(&self, their_node_id: &PublicKey, msg: &msgs::Stfu) { ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id); } + #[cfg(splicing)] fn handle_splice(&self, their_node_id: &PublicKey, msg: &msgs::Splice) { ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id); } + #[cfg(splicing)] fn handle_splice_ack(&self, their_node_id: &PublicKey, msg: &msgs::SpliceAck) { ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id); } + #[cfg(splicing)] fn handle_splice_locked(&self, their_node_id: &PublicKey, msg: &msgs::SpliceLocked) { ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id); } @@ -305,6 +309,7 @@ impl ChannelMessageHandler for ErroringMessageHandler { features.set_channel_type_optional(); features.set_scid_privacy_optional(); features.set_zero_conf_optional(); + features.set_route_blinding_optional(); features } @@ -316,11 +321,11 @@ impl ChannelMessageHandler for ErroringMessageHandler { } fn handle_open_channel_v2(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannelV2) { - ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id); + ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id); } fn handle_accept_channel_v2(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) { - ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id); + ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id); } fn handle_tx_add_input(&self, their_node_id: &PublicKey, msg: &msgs::TxAddInput) { @@ -432,6 +437,26 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone { fn disconnect_socket(&mut self); } +/// Details of a connected peer as returned by [`PeerManager::list_peers`]. +pub struct PeerDetails { + /// The node id of the peer. + /// + /// For outbound connections, this [`PublicKey`] will be the same as the `their_node_id` parameter + /// passed in to [`PeerManager::new_outbound_connection`]. + pub counterparty_node_id: PublicKey, + /// The socket address the peer provided in the initial handshake. + /// + /// Will only be `Some` if an address had been previously provided to + /// [`PeerManager::new_outbound_connection`] or [`PeerManager::new_inbound_connection`]. + pub socket_address: Option, + /// The features the peer provided in the initial handshake. + pub init_features: InitFeatures, + /// Indicates the direction of the peer connection. + /// + /// Will be `true` for inbound connections, and `false` for outbound connections. + pub is_inbound_connection: bool, +} + /// Error for PeerManager errors. If you get one of these, you must disconnect the socket and /// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the /// descriptor. @@ -945,8 +970,8 @@ impl Vec<(PublicKey, Option)> { + /// Returns a list of [`PeerDetails`] for connected peers that have completed the initial + /// handshake. + pub fn list_peers(&self) -> Vec { let peers = self.peers.read().unwrap(); peers.values().filter_map(|peer_mutex| { let p = peer_mutex.lock().unwrap(); if !p.handshake_complete() { return None; } - Some((p.their_node_id.unwrap().0, p.their_socket_address.clone())) + let details = PeerDetails { + // unwrap safety: their_node_id is guaranteed to be `Some` after the handshake + // completed. + counterparty_node_id: p.their_node_id.unwrap().0, + socket_address: p.their_socket_address.clone(), + // unwrap safety: their_features is guaranteed to be `Some` after the handshake + // completed. + init_features: p.their_features.clone().unwrap(), + is_inbound_connection: p.inbound_connection, + }; + Some(details) }).collect() } + /// Returns the [`PeerDetails`] of a connected peer that has completed the initial handshake. + /// + /// Will return `None` if the peer is unknown or it hasn't completed the initial handshake. + pub fn peer_by_node_id(&self, their_node_id: &PublicKey) -> Option { + let peers = self.peers.read().unwrap(); + peers.values().find_map(|peer_mutex| { + let p = peer_mutex.lock().unwrap(); + if !p.handshake_complete() { + return None; + } + + // unwrap safety: their_node_id is guaranteed to be `Some` after the handshake + // completed. + let counterparty_node_id = p.their_node_id.unwrap().0; + + if counterparty_node_id != *their_node_id { + return None; + } + + let details = PeerDetails { + counterparty_node_id, + socket_address: p.their_socket_address.clone(), + // unwrap safety: their_features is guaranteed to be `Some` after the handshake + // completed. + init_features: p.their_features.clone().unwrap(), + is_inbound_connection: p.inbound_connection, + }; + Some(details) + }) + } + fn get_ephemeral_key(&self) -> SecretKey { let mut ephemeral_hash = self.ephemeral_key_midstate.clone(); let counter = self.peer_counter.get_increment(); @@ -1271,7 +1329,7 @@ impl(&self, peer: &mut Peer, message: &M) { - let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None); + let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None); if is_gossip_msg(message.type_id()) { log_gossip!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0)); } else { @@ -1330,7 +1388,9 @@ impl { - log_given_level!(logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); + log_given_level!(logger, level, "Error handling {}message{}; ignoring: {}", + if level == Level::Gossip { "gossip " } else { "" }, + OptionalFromDebugger(&peer_node_id), e.err); continue }, msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these @@ -1376,7 +1436,7 @@ impl { - let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None); + let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None); match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) { hash_map::Entry::Occupied(e) => { log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0)); @@ -1418,7 +1478,6 @@ impl { let their_node_id = try_potential_handleerror!(peer, @@ -1431,7 +1490,6 @@ impl { if peer.pending_read_is_header { @@ -1456,7 +1514,7 @@ impl x, Err(e) => { @@ -1497,6 +1555,7 @@ impl return Err(PeerHandleError { }), (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }), + (msgs::DecodeError::DangerousValue, _) => return Err(PeerHandleError { }), } } }; @@ -1534,15 +1593,37 @@ impl, - mut peer_lock: MutexGuard, - message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage> - ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> { + peer_lock: MutexGuard, + message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage> + ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> { let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages").0; let logger = WithContext::from(&self.logger, Some(their_node_id), None); + + let message = match self.do_handle_message_holding_peer_lock(peer_lock, message, &their_node_id, &logger)? { + Some(processed_message) => processed_message, + None => return Ok(None), + }; + + self.do_handle_message_without_peer_lock(peer_mutex, message, &their_node_id, &logger) + } + + // Conducts all message processing that requires us to hold the `peer_lock`. + // + // Returns `None` if the message was fully processed and otherwise returns the message back to + // allow it to be subsequently processed by `do_handle_message_without_peer_lock`. + fn do_handle_message_holding_peer_lock<'a>( + &self, + mut peer_lock: MutexGuard, + message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, + their_node_id: &PublicKey, + logger: &WithContext<'a, L> + ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> + { peer_lock.received_message_since_timer_tick = true; // Need an Init as first message @@ -1601,6 +1682,7 @@ impl( + &self, + peer_mutex: &Mutex, + message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, + their_node_id: &PublicKey, + logger: &WithContext<'a, L> + ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> + { if is_gossip_msg(message.type_id()) { log_gossip!(logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id)); } else { @@ -1693,13 +1787,16 @@ impl { self.message_handler.chan_handler.handle_splice(&their_node_id, &msg); } + #[cfg(splicing)] wire::Message::SpliceAck(msg) => { self.message_handler.chan_handler.handle_splice_ack(&their_node_id, &msg); } + #[cfg(splicing)] wire::Message::SpliceLocked(msg) => { self.message_handler.chan_handler.handle_splice_locked(&their_node_id, &msg); } @@ -1826,7 +1923,7 @@ impl>, msg: &wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) { + fn forward_broadcast_msg(&self, peers: &HashMap>, msg: &wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) { match msg { wire::Message::ChannelAnnouncement(ref msg) => { log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); @@ -1834,13 +1931,13 @@ impl { - log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", + log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - &msg.temporary_channel_id); + &msg.common_fields.temporary_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => { - log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}", + log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - &msg.temporary_channel_id); + &msg.common_fields.temporary_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => { - log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannel event in peer_handler for node {} for channel {}", + log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendOpenChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - &msg.temporary_channel_id); + &msg.common_fields.temporary_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => { - log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}", + log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - &msg.temporary_channel_id); + &msg.common_fields.temporary_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", log_pubkey!(node_id), &msg.temporary_channel_id, - log_funding_channel_id!(msg.funding_txid, msg.funding_output_index)); + ChannelId::v1_from_funding_txid(msg.funding_txid.as_byte_array(), msg.funding_output_index)); // TODO: If the peer is gone we should generate a DiscardFunding event // indicating to the wallet that they should just throw away this funding transaction self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); @@ -2216,16 +2313,16 @@ impl::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg)); + let msg = msg.map(|msg| wire::Message::<<::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg)); peers_to_disconnect.insert(node_id, msg); }, msgs::ErrorAction::DisconnectPeerWithWarning { msg } => { log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}", log_pubkey!(node_id), msg.data); // We do not have the peers write lock, so we just store that we're - // about to disconenct the peer and do it after we finish + // about to disconnect the peer and do it after we finish // processing most messages. peers_to_disconnect.insert(node_id, Some(wire::Message::Warning(msg))); }, @@ -2582,7 +2679,7 @@ mod tests { use crate::ln::ChannelId; use crate::ln::features::{InitFeatures, NodeFeatures}; use crate::ln::peer_channel_encryptor::PeerChannelEncryptor; - use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses}; + use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses, ErroringMessageHandler, MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER}; use crate::ln::{msgs, wire}; use crate::ln::msgs::{LightningError, SocketAddress}; use crate::util::test_utils; @@ -2591,11 +2688,13 @@ mod tests { use bitcoin::blockdata::constants::ChainHash; use bitcoin::secp256k1::{PublicKey, SecretKey}; - use crate::prelude::*; use crate::sync::{Arc, Mutex}; use core::convert::Infallible; use core::sync::atomic::{AtomicBool, Ordering}; + #[allow(unused_imports)] + use crate::prelude::*; + #[derive(Clone)] struct FileDescriptor { fd: u16, @@ -2745,6 +2844,8 @@ mod tests { }; let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000}; let id_b = peer_b.node_signer.get_node_id(Recipient::Node).unwrap(); + let features_a = peer_a.init_features(&id_b); + let features_b = peer_b.init_features(&id_a); let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())), disconnect: Arc::new(AtomicBool::new(false)), @@ -2766,9 +2867,12 @@ mod tests { let a_data = fd_a.outbound_data.lock().unwrap().split_off(0); assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false); - assert!(peer_a.get_peer_node_ids().contains(&(id_b, Some(addr_b)))); - assert!(peer_b.get_peer_node_ids().contains(&(id_a, Some(addr_a)))); - + assert_eq!(peer_a.peer_by_node_id(&id_b).unwrap().counterparty_node_id, id_b); + assert_eq!(peer_a.peer_by_node_id(&id_b).unwrap().socket_address, Some(addr_b)); + assert_eq!(peer_a.peer_by_node_id(&id_b).unwrap().init_features, features_b); + assert_eq!(peer_b.peer_by_node_id(&id_a).unwrap().counterparty_node_id, id_a); + assert_eq!(peer_b.peer_by_node_id(&id_a).unwrap().socket_address, Some(addr_a)); + assert_eq!(peer_b.peer_by_node_id(&id_a).unwrap().init_features, features_a); (fd_a.clone(), fd_b.clone()) } @@ -3117,6 +3221,105 @@ mod tests { assert!(peers[0].read_event(&mut fd_a, &b_data).is_err()); } + #[test] + fn test_inbound_conn_handshake_complete_awaiting_pong() { + // Test that we do not disconnect an outbound peer after the noise handshake completes due + // to a pong timeout for a ping that was never sent if a timer tick fires after we send act + // two of the noise handshake along with our init message but before we receive their init + // message. + let logger = test_utils::TestLogger::new(); + let node_signer_a = test_utils::TestNodeSigner::new(SecretKey::from_slice(&[42; 32]).unwrap()); + let node_signer_b = test_utils::TestNodeSigner::new(SecretKey::from_slice(&[43; 32]).unwrap()); + let peer_a = PeerManager::new(MessageHandler { + chan_handler: ErroringMessageHandler::new(), + route_handler: IgnoringMessageHandler {}, + onion_message_handler: IgnoringMessageHandler {}, + custom_message_handler: IgnoringMessageHandler {}, + }, 0, &[0; 32], &logger, &node_signer_a); + let peer_b = PeerManager::new(MessageHandler { + chan_handler: ErroringMessageHandler::new(), + route_handler: IgnoringMessageHandler {}, + onion_message_handler: IgnoringMessageHandler {}, + custom_message_handler: IgnoringMessageHandler {}, + }, 0, &[1; 32], &logger, &node_signer_b); + + let a_id = node_signer_a.get_node_id(Recipient::Node).unwrap(); + let mut fd_a = FileDescriptor { + fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())), + disconnect: Arc::new(AtomicBool::new(false)), + }; + let mut fd_b = FileDescriptor { + fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())), + disconnect: Arc::new(AtomicBool::new(false)), + }; + + // Exchange messages with both peers until they both complete the init handshake. + let act_one = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap(); + peer_a.new_inbound_connection(fd_a.clone(), None).unwrap(); + + assert_eq!(peer_a.read_event(&mut fd_a, &act_one).unwrap(), false); + peer_a.process_events(); + + let act_two = fd_a.outbound_data.lock().unwrap().split_off(0); + assert_eq!(peer_b.read_event(&mut fd_b, &act_two).unwrap(), false); + peer_b.process_events(); + + // Calling this here triggers the race on inbound connections. + peer_b.timer_tick_occurred(); + + let act_three_with_init_b = fd_b.outbound_data.lock().unwrap().split_off(0); + assert!(!peer_a.peers.read().unwrap().get(&fd_a).unwrap().lock().unwrap().handshake_complete()); + assert_eq!(peer_a.read_event(&mut fd_a, &act_three_with_init_b).unwrap(), false); + peer_a.process_events(); + assert!(peer_a.peers.read().unwrap().get(&fd_a).unwrap().lock().unwrap().handshake_complete()); + + let init_a = fd_a.outbound_data.lock().unwrap().split_off(0); + assert!(!init_a.is_empty()); + + assert!(!peer_b.peers.read().unwrap().get(&fd_b).unwrap().lock().unwrap().handshake_complete()); + assert_eq!(peer_b.read_event(&mut fd_b, &init_a).unwrap(), false); + peer_b.process_events(); + assert!(peer_b.peers.read().unwrap().get(&fd_b).unwrap().lock().unwrap().handshake_complete()); + + // Make sure we're still connected. + assert_eq!(peer_b.peers.read().unwrap().len(), 1); + + // B should send a ping on the first timer tick after `handshake_complete`. + assert!(fd_b.outbound_data.lock().unwrap().split_off(0).is_empty()); + peer_b.timer_tick_occurred(); + peer_b.process_events(); + assert!(!fd_b.outbound_data.lock().unwrap().split_off(0).is_empty()); + + let mut send_warning = || { + { + let peers = peer_a.peers.read().unwrap(); + let mut peer_b = peers.get(&fd_a).unwrap().lock().unwrap(); + peer_a.enqueue_message(&mut peer_b, &msgs::WarningMessage { + channel_id: ChannelId([0; 32]), + data: "no disconnect plz".to_string(), + }); + } + peer_a.process_events(); + let msg = fd_a.outbound_data.lock().unwrap().split_off(0); + assert!(!msg.is_empty()); + assert_eq!(peer_b.read_event(&mut fd_b, &msg).unwrap(), false); + peer_b.process_events(); + }; + + // Fire more ticks until we reach the pong timeout. We send any message except pong to + // pretend the connection is still alive. + send_warning(); + for _ in 0..MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER { + peer_b.timer_tick_occurred(); + send_warning(); + } + assert_eq!(peer_b.peers.read().unwrap().len(), 1); + + // One more tick should enforce the pong timeout. + peer_b.timer_tick_occurred(); + assert_eq!(peer_b.peers.read().unwrap().len(), 0); + } + #[test] fn test_filter_addresses(){ // Tests the filter_addresses function.