X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-net-tokio%2Fsrc%2Flib.rs;h=5527d85adf6f819e0b02e3ad05e18cc7c4426ae6;hb=b1d3aa86a455bce9321ac70560fbf549a8e43a51;hp=724a57fb15c19bb1ba9c9c09bd3fbe44fc3a6478;hpb=99985c620962fd35163ee8b585c3a7edb6491db7;p=rust-lightning diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 724a57fb..5527d85a 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -32,31 +32,93 @@ use bitcoin::secp256k1::PublicKey; use tokio::net::TcpStream; -use tokio::{io, time}; +use tokio::time; use tokio::sync::mpsc; -use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; use lightning::ln::peer_handler; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; use lightning::ln::peer_handler::APeerManager; -use lightning::ln::msgs::NetAddress; +use lightning::ln::msgs::SocketAddress; use std::ops::Deref; -use std::task; +use std::task::{self, Poll}; +use std::future::Future; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicU64, Ordering}; use std::time::Duration; +use std::pin::Pin; use std::hash::Hash; static ID_COUNTER: AtomicU64 = AtomicU64::new(0); +// We only need to select over multiple futures in one place, and taking on the full `tokio/macros` +// dependency tree in order to do so (which has broken our MSRV before) is excessive. Instead, we +// define a trivial two- and three- select macro with the specific types we need and just use that. + +pub(crate) enum SelectorOutput { + A(Option<()>), B(Option<()>), C(tokio::io::Result<()>), +} + +pub(crate) struct TwoSelector< + A: Future> + Unpin, B: Future> + Unpin +> { + pub a: A, + pub b: B, +} + +impl< + A: Future> + Unpin, B: Future> + Unpin +> Future for TwoSelector { + type Output = SelectorOutput; + fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + match Pin::new(&mut self.a).poll(ctx) { + Poll::Ready(res) => { return Poll::Ready(SelectorOutput::A(res)); }, + Poll::Pending => {}, + } + match Pin::new(&mut self.b).poll(ctx) { + Poll::Ready(res) => { return Poll::Ready(SelectorOutput::B(res)); }, + Poll::Pending => {}, + } + Poll::Pending + } +} + +pub(crate) struct ThreeSelector< + A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin +> { + pub a: A, + pub b: B, + pub c: C, +} + +impl< + A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin +> Future for ThreeSelector { + type Output = SelectorOutput; + fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + match Pin::new(&mut self.a).poll(ctx) { + Poll::Ready(res) => { return Poll::Ready(SelectorOutput::A(res)); }, + Poll::Pending => {}, + } + match Pin::new(&mut self.b).poll(ctx) { + Poll::Ready(res) => { return Poll::Ready(SelectorOutput::B(res)); }, + Poll::Pending => {}, + } + match Pin::new(&mut self.c).poll(ctx) { + Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); }, + Poll::Pending => {}, + } + Poll::Pending + } +} + /// Connection contains all our internal state for a connection - we hold a reference to the /// Connection object (in an Arc>) in each SocketDescriptor we create as well as in the /// read future (which is returned by schedule_read). struct Connection { - writer: Option>, + writer: Option>, // Because our PeerManager is templated by user-provided types, and we can't (as far as I can // tell) have a const RawWakerVTable built out of templated functions, we need some indirection // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail. @@ -93,7 +155,7 @@ impl Connection { async fn schedule_read( peer_manager: PM, us: Arc>, - mut reader: io::ReadHalf, + reader: Arc, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>, ) where PM::Target: APeerManager { @@ -127,29 +189,49 @@ impl Connection { } us_lock.read_paused }; - tokio::select! { - v = write_avail_receiver.recv() => { + // TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support. + let select_result = if read_paused { + TwoSelector { + a: Box::pin(write_avail_receiver.recv()), + b: Box::pin(read_wake_receiver.recv()), + }.await + } else { + ThreeSelector { + a: Box::pin(write_avail_receiver.recv()), + b: Box::pin(read_wake_receiver.recv()), + c: Box::pin(reader.readable()), + }.await + }; + match select_result { + SelectorOutput::A(v) => { assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc! if peer_manager.as_ref().write_buffer_space_avail(&mut our_descriptor).is_err() { break Disconnect::CloseConnection; } }, - _ = read_wake_receiver.recv() => {}, - read = reader.read(&mut buf), if !read_paused => match read { - Ok(0) => break Disconnect::PeerDisconnected, - Ok(len) => { - let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]); - let mut us_lock = us.lock().unwrap(); - match read_res { - Ok(pause_read) => { - if pause_read { - us_lock.read_paused = true; - } - }, - Err(_) => break Disconnect::CloseConnection, - } - }, - Err(_) => break Disconnect::PeerDisconnected, + SelectorOutput::B(_) => {}, + SelectorOutput::C(res) => { + if res.is_err() { break Disconnect::PeerDisconnected; } + match reader.try_read(&mut buf) { + Ok(0) => break Disconnect::PeerDisconnected, + Ok(len) => { + let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]); + let mut us_lock = us.lock().unwrap(); + match read_res { + Ok(pause_read) => { + if pause_read { + us_lock.read_paused = true; + } + }, + Err(_) => break Disconnect::CloseConnection, + } + }, + Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { + // readable() is allowed to spuriously wake, so we have to handle + // WouldBlock here. + }, + Err(_) => break Disconnect::PeerDisconnected, + } }, } let _ = event_waker.try_send(()); @@ -161,18 +243,14 @@ impl Connection { // here. let _ = tokio::task::yield_now().await; }; - let writer_option = us.lock().unwrap().writer.take(); - if let Some(mut writer) = writer_option { - // If the socket is already closed, shutdown() will fail, so just ignore it. - let _ = writer.shutdown().await; - } + us.lock().unwrap().writer.take(); if let Disconnect::PeerDisconnected = disconnect_type { peer_manager.as_ref().socket_disconnected(&our_descriptor); peer_manager.as_ref().process_events(); } } - fn new(stream: StdTcpStream) -> (io::ReadHalf, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { + fn new(stream: StdTcpStream) -> (Arc, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { // We only ever need a channel of depth 1 here: if we returned a non-full write to the // PeerManager, we will eventually get notified that there is room in the socket to write // new bytes, which will generate an event. That event will be popped off the queue before @@ -184,24 +262,24 @@ impl Connection { // false. let (read_waker, read_receiver) = mpsc::channel(1); stream.set_nonblocking(true).unwrap(); - let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap()); + let tokio_stream = Arc::new(TcpStream::from_std(stream).unwrap()); - (reader, write_receiver, read_receiver, + (Arc::clone(&tokio_stream), write_receiver, read_receiver, Arc::new(Mutex::new(Self { - writer: Some(writer), write_avail, read_waker, read_paused: false, + writer: Some(tokio_stream), write_avail, read_waker, read_paused: false, rl_requested_disconnect: false, id: ID_COUNTER.fetch_add(1, Ordering::AcqRel) }))) } } -fn get_addr_from_stream(stream: &StdTcpStream) -> Option { +fn get_addr_from_stream(stream: &StdTcpStream) -> Option { match stream.peer_addr() { - Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 { + Ok(SocketAddr::V4(sockaddr)) => Some(SocketAddress::TcpIpV4 { addr: sockaddr.ip().octets(), port: sockaddr.port(), }), - Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 { + Ok(SocketAddr::V6(sockaddr)) => Some(SocketAddress::TcpIpV6 { addr: sockaddr.ip().octets(), port: sockaddr.port(), }), @@ -384,9 +462,9 @@ impl SocketDescriptor { } impl peer_handler::SocketDescriptor for SocketDescriptor { fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize { - // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream, - // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with - // a SocketDescriptor in it which can wake up the write_avail Sender, waking up the + // To send data, we take a lock on our Connection to access the TcpStream, writing to it if + // there's room in the kernel buffer, or otherwise create a new Waker with a + // SocketDescriptor in it which can wake up the write_avail Sender, waking up the // processing future which will call write_buffer_space_avail and we'll end up back here. let mut us = self.conn.lock().unwrap(); if us.writer.is_none() { @@ -406,24 +484,18 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { let mut ctx = task::Context::from_waker(&waker); let mut written_len = 0; loop { - match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) { - task::Poll::Ready(Ok(res)) => { - // The tokio docs *seem* to indicate this can't happen, and I certainly don't - // know how to handle it if it does (cause it should be a Poll::Pending - // instead): - assert_ne!(res, 0); - written_len += res; - if written_len == data.len() { return written_len; } - }, - task::Poll::Ready(Err(e)) => { - // The tokio docs *seem* to indicate this can't happen, and I certainly don't - // know how to handle it if it does (cause it should be a Poll::Pending - // instead): - assert_ne!(e.kind(), io::ErrorKind::WouldBlock); - // Probably we've already been closed, just return what we have and let the - // read thread handle closing logic. - return written_len; + match us.writer.as_ref().unwrap().poll_write_ready(&mut ctx) { + task::Poll::Ready(Ok(())) => { + match us.writer.as_ref().unwrap().try_write(&data[written_len..]) { + Ok(res) => { + debug_assert_ne!(res, 0); + written_len += res; + if written_len == data.len() { return written_len; } + }, + Err(_) => return written_len, + } }, + task::Poll::Ready(Err(_)) => return written_len, task::Poll::Pending => { // We're queued up for a write event now, but we need to make sure we also // pause read given we're now waiting on the remote end to ACK (and in