X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-net-tokio%2Fsrc%2Flib.rs;h=3fbe6aab949bb6d19afb435795c6ead0cb212c8e;hb=7f4ac0ea8144b10a21d491fa06a893a68da97c24;hp=5e1aa40b33968c831ec88c55d71c2ac8f8835a64;hpb=fca67bcaa4c7bf36da7cebf142b0baf33488909d;p=rust-lightning diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 5e1aa40b..3fbe6aab 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -23,7 +23,7 @@ //! # Example //! ``` //! use std::net::TcpStream; -//! use bitcoin::secp256k1::key::PublicKey; +//! use bitcoin::secp256k1::PublicKey; //! use lightning::util::events::{Event, EventHandler, EventsProvider}; //! use std::net::SocketAddr; //! use std::sync::Arc; @@ -66,12 +66,14 @@ //! } //! ``` +// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings. #![deny(broken_intra_doc_links)] -#![deny(missing_docs)] +#![deny(private_intra_doc_links)] +#![deny(missing_docs)] #![cfg_attr(docsrs, feature(doc_auto_cfg))] -use bitcoin::secp256k1::key::PublicKey; +use bitcoin::secp256k1::PublicKey; use tokio::net::TcpStream; use tokio::{io, time}; @@ -81,9 +83,10 @@ use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; use lightning::ln::peer_handler; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; use lightning::ln::peer_handler::CustomMessageHandler; -use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress}; +use lightning::ln::msgs::{ChannelMessageHandler, NetAddress, OnionMessageHandler, RoutingMessageHandler}; use lightning::util::logger::Logger; +use std::ops::Deref; use std::task; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; @@ -120,11 +123,42 @@ struct Connection { id: u64, } impl Connection { - async fn schedule_read(peer_manager: Arc, Arc, Arc, Arc>>, us: Arc>, mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where - CMH: ChannelMessageHandler + 'static, - RMH: RoutingMessageHandler + 'static, - L: Logger + 'static + ?Sized, - UMH: CustomMessageHandler + 'static { + async fn poll_event_process(peer_manager: Arc>, mut event_receiver: mpsc::Receiver<()>) where + CMH: Deref + 'static + Send + Sync, + RMH: Deref + 'static + Send + Sync, + OMH: Deref + 'static + Send + Sync, + L: Deref + 'static + Send + Sync, + UMH: Deref + 'static + Send + Sync, + CMH::Target: ChannelMessageHandler + Send + Sync, + RMH::Target: RoutingMessageHandler + Send + Sync, + OMH::Target: OnionMessageHandler + Send + Sync, + L::Target: Logger + Send + Sync, + UMH::Target: CustomMessageHandler + Send + Sync, + { + loop { + if event_receiver.recv().await.is_none() { + return; + } + peer_manager.process_events(); + } + } + + async fn schedule_read(peer_manager: Arc>, us: Arc>, mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where + CMH: Deref + 'static + Send + Sync, + RMH: Deref + 'static + Send + Sync, + OMH: Deref + 'static + Send + Sync, + L: Deref + 'static + Send + Sync, + UMH: Deref + 'static + Send + Sync, + CMH::Target: ChannelMessageHandler + 'static + Send + Sync, + RMH::Target: RoutingMessageHandler + 'static + Send + Sync, + OMH::Target: OnionMessageHandler + 'static + Send + Sync, + L::Target: Logger + 'static + Send + Sync, + UMH::Target: CustomMessageHandler + 'static + Send + Sync, + { + // Create a waker to wake up poll_event_process, above + let (event_waker, event_receiver) = mpsc::channel(1); + tokio::spawn(Self::poll_event_process(Arc::clone(&peer_manager), event_receiver)); + // 8KB is nice and big but also should never cause any issues with stack overflowing. let mut buf = [0; 8192]; @@ -175,7 +209,14 @@ impl Connection { Err(_) => break Disconnect::PeerDisconnected, }, } - peer_manager.process_events(); + let _ = event_waker.try_send(()); + + // At this point we've processed a message or two, and reset the ping timer for this + // peer, at least in the "are we still receiving messages" context, if we don't give up + // our timeslice to another task we may just spin on this peer, starving other peers + // and eventually disconnecting them for ping timeouts. Instead, we explicitly yield + // here. + tokio::task::yield_now().await; }; let writer_option = us.lock().unwrap().writer.take(); if let Some(mut writer) = writer_option { @@ -231,11 +272,18 @@ fn get_addr_from_stream(stream: &StdTcpStream) -> Option { /// The returned future will complete when the peer is disconnected and associated handling /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do /// not need to poll the provided future in order to make progress. -pub fn setup_inbound(peer_manager: Arc, Arc, Arc, Arc>>, stream: StdTcpStream) -> impl std::future::Future where - CMH: ChannelMessageHandler + 'static + Send + Sync, - RMH: RoutingMessageHandler + 'static + Send + Sync, - L: Logger + 'static + ?Sized + Send + Sync, - UMH: CustomMessageHandler + 'static + Send + Sync { +pub fn setup_inbound(peer_manager: Arc>, stream: StdTcpStream) -> impl std::future::Future where + CMH: Deref + 'static + Send + Sync, + RMH: Deref + 'static + Send + Sync, + OMH: Deref + 'static + Send + Sync, + L: Deref + 'static + Send + Sync, + UMH: Deref + 'static + Send + Sync, + CMH::Target: ChannelMessageHandler + Send + Sync, + RMH::Target: RoutingMessageHandler + Send + Sync, + OMH::Target: OnionMessageHandler + Send + Sync, + L::Target: Logger + Send + Sync, + UMH::Target: CustomMessageHandler + Send + Sync, +{ let remote_addr = get_addr_from_stream(&stream); let (reader, write_receiver, read_receiver, us) = Connection::new(stream); #[cfg(debug_assertions)] @@ -273,11 +321,18 @@ pub fn setup_inbound(peer_manager: Arc(peer_manager: Arc, Arc, Arc, Arc>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future where - CMH: ChannelMessageHandler + 'static + Send + Sync, - RMH: RoutingMessageHandler + 'static + Send + Sync, - L: Logger + 'static + ?Sized + Send + Sync, - UMH: CustomMessageHandler + 'static + Send + Sync { +pub fn setup_outbound(peer_manager: Arc>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future where + CMH: Deref + 'static + Send + Sync, + RMH: Deref + 'static + Send + Sync, + OMH: Deref + 'static + Send + Sync, + L: Deref + 'static + Send + Sync, + UMH: Deref + 'static + Send + Sync, + CMH::Target: ChannelMessageHandler + Send + Sync, + RMH::Target: RoutingMessageHandler + Send + Sync, + OMH::Target: OnionMessageHandler + Send + Sync, + L::Target: Logger + Send + Sync, + UMH::Target: CustomMessageHandler + Send + Sync, +{ let remote_addr = get_addr_from_stream(&stream); let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream); #[cfg(debug_assertions)] @@ -344,11 +399,18 @@ pub fn setup_outbound(peer_manager: Arc(peer_manager: Arc, Arc, Arc, Arc>>, their_node_id: PublicKey, addr: SocketAddr) -> Option> where - CMH: ChannelMessageHandler + 'static + Send + Sync, - RMH: RoutingMessageHandler + 'static + Send + Sync, - L: Logger + 'static + ?Sized + Send + Sync, - UMH: CustomMessageHandler + 'static + Send + Sync { +pub async fn connect_outbound(peer_manager: Arc>, their_node_id: PublicKey, addr: SocketAddr) -> Option> where + CMH: Deref + 'static + Send + Sync, + RMH: Deref + 'static + Send + Sync, + OMH: Deref + 'static + Send + Sync, + L: Deref + 'static + Send + Sync, + UMH: Deref + 'static + Send + Sync, + CMH::Target: ChannelMessageHandler + Send + Sync, + RMH::Target: RoutingMessageHandler + Send + Sync, + OMH::Target: OnionMessageHandler + Send + Sync, + L::Target: Logger + Send + Sync, + UMH::Target: CustomMessageHandler + Send + Sync, +{ if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await { Some(setup_outbound(peer_manager, their_node_id, stream)) } else { None } @@ -443,6 +505,9 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { // pause read given we're now waiting on the remote end to ACK (and in // accordance with the send_data() docs). us.read_paused = true; + // Further, to avoid any current pending read causing a `read_event` call, wake + // up the read_waker and restart its loop. + let _ = us.read_waker.try_send(()); return written_len; }, } @@ -481,6 +546,7 @@ mod tests { use lightning::ln::features::*; use lightning::ln::msgs::*; use lightning::ln::peer_handler::{MessageHandler, PeerManager}; + use lightning::ln::features::NodeFeatures; use lightning::util::events::*; use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey}; @@ -509,20 +575,22 @@ mod tests { fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result { Ok(false) } fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result { Ok(false) } fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result { Ok(false) } - fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option, Option)> { Vec::new() } - fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec { Vec::new() } - fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) { } + fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option, Option)> { None } + fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option { None } + fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { Ok(()) } fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) } + fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } + fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() } } impl ChannelMessageHandler for MsgHandler { fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &OpenChannel) {} fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &AcceptChannel) {} fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {} fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {} - fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &FundingLocked) {} + fn handle_channel_ready(&self, _their_node_id: &PublicKey, _msg: &ChannelReady) {} fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, _msg: &Shutdown) {} fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {} fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {} @@ -540,13 +608,16 @@ mod tests { self.pubkey_disconnected.clone().try_send(()).unwrap(); } } - fn peer_connected(&self, their_node_id: &PublicKey, _msg: &Init) { + fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { if *their_node_id == self.expected_pubkey { self.pubkey_connected.clone().try_send(()).unwrap(); } + Ok(()) } fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &ChannelReestablish) {} fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {} + fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } + fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() } } impl MessageSendEventsProvider for MsgHandler { fn get_and_clear_pending_msg_events(&self) -> Vec { @@ -556,6 +627,22 @@ mod tests { } } + fn make_tcp_connection() -> (std::net::TcpStream, std::net::TcpStream) { + if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") { + (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:19735") { + (std::net::TcpStream::connect("127.0.0.1:19735").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9997") { + (std::net::TcpStream::connect("127.0.0.1:9997").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9998") { + (std::net::TcpStream::connect("127.0.0.1:9998").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") { + (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") { + (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0) + } else { panic!("Failed to bind to v4 localhost on common ports"); } + } + async fn do_basic_connection_test() { let secp_ctx = Secp256k1::new(); let a_key = SecretKey::from_slice(&[1; 32]).unwrap(); @@ -575,7 +662,8 @@ mod tests { let a_manager = Arc::new(PeerManager::new(MessageHandler { chan_handler: Arc::clone(&a_handler), route_handler: Arc::clone(&a_handler), - }, a_key.clone(), &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), + }, a_key.clone(), 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); let (b_connected_sender, mut b_connected) = mpsc::channel(1); let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1); @@ -589,19 +677,14 @@ mod tests { let b_manager = Arc::new(PeerManager::new(MessageHandler { chan_handler: Arc::clone(&b_handler), route_handler: Arc::clone(&b_handler), - }, b_key.clone(), &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), + }, b_key.clone(), 0, &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); // We bind on localhost, hoping the environment is properly configured with a local // address. This may not always be the case in containers and the like, so if this test is // failing for you check that you have a loopback interface and it is configured with // 127.0.0.1. - let (conn_a, conn_b) = if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") { - (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0) - } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") { - (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0) - } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") { - (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0) - } else { panic!("Failed to bind to v4 localhost on common ports"); }; + let (conn_a, conn_b) = make_tcp_connection(); let fut_a = super::setup_outbound(Arc::clone(&a_manager), b_pub, conn_a); let fut_b = super::setup_inbound(b_manager, conn_b); @@ -629,8 +712,54 @@ mod tests { async fn basic_threaded_connection_test() { do_basic_connection_test().await; } + #[tokio::test] async fn basic_unthreaded_connection_test() { do_basic_connection_test().await; } + + async fn race_disconnect_accept() { + // Previously, if we handed an already-disconnected socket to `setup_inbound` we'd panic. + // This attempts to find other similar races by opening connections and shutting them down + // while connecting. Sadly in testing this did *not* reproduce the previous issue. + let secp_ctx = Secp256k1::new(); + let a_key = SecretKey::from_slice(&[1; 32]).unwrap(); + let b_key = SecretKey::from_slice(&[2; 32]).unwrap(); + let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key); + + let a_manager = Arc::new(PeerManager::new(MessageHandler { + chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()), + onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), + route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), + }, a_key, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + + // Make two connections, one for an inbound and one for an outbound connection + let conn_a = { + let (conn_a, _) = make_tcp_connection(); + conn_a + }; + let conn_b = { + let (_, conn_b) = make_tcp_connection(); + conn_b + }; + + // Call connection setup inside new tokio tasks. + let manager_reference = Arc::clone(&a_manager); + tokio::spawn(async move { + super::setup_inbound(manager_reference, conn_a).await + }); + tokio::spawn(async move { + super::setup_outbound(a_manager, b_pub, conn_b).await + }); + } + + #[tokio::test(flavor = "multi_thread")] + async fn threaded_race_disconnect_accept() { + race_disconnect_accept().await; + } + + #[tokio::test] + async fn unthreaded_race_disconnect_accept() { + race_disconnect_accept().await; + } }