X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-net-tokio%2Fsrc%2Flib.rs;h=2ac10762b04eaf23f602bc3d8dcc987438542cae;hb=ab20284e2673e946ebdffcd5be7388de917a168f;hp=a9fd861bc846e440fd8ff54ab31deedf622aa8e6;hpb=7671ae54522197dbca495c4c7b3fec5c814a1841;p=rust-lightning diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index a9fd861b..2ac10762 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -23,7 +23,7 @@ //! # Example //! ``` //! use std::net::TcpStream; -//! use bitcoin::secp256k1::key::PublicKey; +//! use bitcoin::secp256k1::PublicKey; //! use lightning::util::events::{Event, EventHandler, EventsProvider}; //! use std::net::SocketAddr; //! use std::sync::Arc; @@ -71,7 +71,7 @@ #![cfg_attr(docsrs, feature(doc_auto_cfg))] -use bitcoin::secp256k1::key::PublicKey; +use bitcoin::secp256k1::PublicKey; use tokio::net::TcpStream; use tokio::{io, time}; @@ -85,7 +85,6 @@ use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddre use lightning::util::logger::Logger; use std::task; -use std::net::IpAddr; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::sync::{Arc, Mutex}; @@ -121,11 +120,28 @@ struct Connection { id: u64, } impl Connection { + async fn poll_event_process(peer_manager: Arc, Arc, Arc, Arc>>, mut event_receiver: mpsc::Receiver<()>) where + CMH: ChannelMessageHandler + 'static + Send + Sync, + RMH: RoutingMessageHandler + 'static + Send + Sync, + L: Logger + 'static + ?Sized + Send + Sync, + UMH: CustomMessageHandler + 'static + Send + Sync { + loop { + if event_receiver.recv().await.is_none() { + return; + } + peer_manager.process_events(); + } + } + async fn schedule_read(peer_manager: Arc, Arc, Arc, Arc>>, us: Arc>, mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where - CMH: ChannelMessageHandler + 'static, - RMH: RoutingMessageHandler + 'static, - L: Logger + 'static + ?Sized, - UMH: CustomMessageHandler + 'static { + CMH: ChannelMessageHandler + 'static + Send + Sync, + RMH: RoutingMessageHandler + 'static + Send + Sync, + L: Logger + 'static + ?Sized + Send + Sync, + UMH: CustomMessageHandler + 'static + Send + Sync { + // Create a waker to wake up poll_event_process, above + let (event_waker, event_receiver) = mpsc::channel(1); + tokio::spawn(Self::poll_event_process(Arc::clone(&peer_manager), event_receiver)); + // 8KB is nice and big but also should never cause any issues with stack overflowing. let mut buf = [0; 8192]; @@ -176,7 +192,14 @@ impl Connection { Err(_) => break Disconnect::PeerDisconnected, }, } - peer_manager.process_events(); + let _ = event_waker.try_send(()); + + // At this point we've processed a message or two, and reset the ping timer for this + // peer, at least in the "are we still receiving messages" context, if we don't give up + // our timeslice to another task we may just spin on this peer, starving other peers + // and eventually disconnecting them for ping timeouts. Instead, we explicitly yield + // here. + tokio::task::yield_now().await; }; let writer_option = us.lock().unwrap().writer.take(); if let Some(mut writer) = writer_option { @@ -212,6 +235,20 @@ impl Connection { } } +fn get_addr_from_stream(stream: &StdTcpStream) -> Option { + match stream.peer_addr() { + Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 { + addr: sockaddr.ip().octets(), + port: sockaddr.port(), + }), + Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 { + addr: sockaddr.ip().octets(), + port: sockaddr.port(), + }), + Err(_) => None, + } +} + /// Process incoming messages and feed outgoing messages on the provided socket generated by /// accepting an incoming connection. /// @@ -223,21 +260,12 @@ pub fn setup_inbound(peer_manager: Arc Some(NetAddress::IPv4 { - addr: ip.octets(), - port: ip_addr.port(), - }), - IpAddr::V6(ip) => Some(NetAddress::IPv6 { - addr: ip.octets(), - port: ip_addr.port(), - }), - }) { + let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) { Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver))) } else { // Note that we will skip socket_disconnected here, in accordance with the PeerManager @@ -274,20 +302,11 @@ pub fn setup_outbound(peer_manager: Arc Some(NetAddress::IPv4 { - addr: ip.octets(), - port: ip_addr.port(), - }), - IpAddr::V6(ip) => Some(NetAddress::IPv6 { - addr: ip.octets(), - port: ip_addr.port(), - }), - }) { + let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) { Some(tokio::spawn(async move { // We should essentially always have enough room in a TCP socket buffer to send the // initial 10s of bytes. However, tokio running in single-threaded mode will always @@ -448,6 +467,9 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { // pause read given we're now waiting on the remote end to ACK (and in // accordance with the send_data() docs). us.read_paused = true; + // Further, to avoid any current pending read causing a `read_event` call, wake + // up the read_waker and restart its loop. + let _ = us.read_waker.try_send(()); return written_len; }, } @@ -527,7 +549,7 @@ mod tests { fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &AcceptChannel) {} fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {} fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {} - fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &FundingLocked) {} + fn handle_channel_ready(&self, _their_node_id: &PublicKey, _msg: &ChannelReady) {} fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, _msg: &Shutdown) {} fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {} fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {} @@ -561,6 +583,22 @@ mod tests { } } + fn make_tcp_connection() -> (std::net::TcpStream, std::net::TcpStream) { + if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") { + (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:19735") { + (std::net::TcpStream::connect("127.0.0.1:19735").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9997") { + (std::net::TcpStream::connect("127.0.0.1:9997").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9998") { + (std::net::TcpStream::connect("127.0.0.1:9998").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") { + (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0) + } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") { + (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0) + } else { panic!("Failed to bind to v4 localhost on common ports"); } + } + async fn do_basic_connection_test() { let secp_ctx = Secp256k1::new(); let a_key = SecretKey::from_slice(&[1; 32]).unwrap(); @@ -600,13 +638,7 @@ mod tests { // address. This may not always be the case in containers and the like, so if this test is // failing for you check that you have a loopback interface and it is configured with // 127.0.0.1. - let (conn_a, conn_b) = if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") { - (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0) - } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") { - (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0) - } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") { - (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0) - } else { panic!("Failed to bind to v4 localhost on common ports"); }; + let (conn_a, conn_b) = make_tcp_connection(); let fut_a = super::setup_outbound(Arc::clone(&a_manager), b_pub, conn_a); let fut_b = super::setup_inbound(b_manager, conn_b); @@ -634,8 +666,53 @@ mod tests { async fn basic_threaded_connection_test() { do_basic_connection_test().await; } + #[tokio::test] async fn basic_unthreaded_connection_test() { do_basic_connection_test().await; } + + async fn race_disconnect_accept() { + // Previously, if we handed an already-disconnected socket to `setup_inbound` we'd panic. + // This attempts to find other similar races by opening connections and shutting them down + // while connecting. Sadly in testing this did *not* reproduce the previous issue. + let secp_ctx = Secp256k1::new(); + let a_key = SecretKey::from_slice(&[1; 32]).unwrap(); + let b_key = SecretKey::from_slice(&[2; 32]).unwrap(); + let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key); + + let a_manager = Arc::new(PeerManager::new(MessageHandler { + chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()), + route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), + }, a_key, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + + // Make two connections, one for an inbound and one for an outbound connection + let conn_a = { + let (conn_a, _) = make_tcp_connection(); + conn_a + }; + let conn_b = { + let (_, conn_b) = make_tcp_connection(); + conn_b + }; + + // Call connection setup inside new tokio tasks. + let manager_reference = Arc::clone(&a_manager); + tokio::spawn(async move { + super::setup_inbound(manager_reference, conn_a).await + }); + tokio::spawn(async move { + super::setup_outbound(a_manager, b_pub, conn_b).await + }); + } + + #[tokio::test(flavor = "multi_thread")] + async fn threaded_race_disconnect_accept() { + race_disconnect_accept().await; + } + + #[tokio::test] + async fn unthreaded_race_disconnect_accept() { + race_disconnect_accept().await; + } }