Remove a handful of redundant imports
[rust-lightning] / lightning-net-tokio / src / lib.rs
index 724a57fb15c19bb1ba9c9c09bd3fbe44fc3a6478..41691d9a0798b87133b90d4d7cd7b713349bafed 100644 (file)
@@ -22,9 +22,8 @@
 //!
 //! [`PeerManager`]: lightning::ln::peer_handler::PeerManager
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
 use bitcoin::secp256k1::PublicKey;
 
 use tokio::net::TcpStream;
-use tokio::{io, time};
+use tokio::time;
 use tokio::sync::mpsc;
-use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
 
 use lightning::ln::peer_handler;
 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
 use lightning::ln::peer_handler::APeerManager;
-use lightning::ln::msgs::NetAddress;
+use lightning::ln::msgs::SocketAddress;
 
 use std::ops::Deref;
-use std::task;
+use std::task::{self, Poll};
+use std::future::Future;
 use std::net::SocketAddr;
 use std::net::TcpStream as StdTcpStream;
 use std::sync::{Arc, Mutex};
 use std::sync::atomic::{AtomicU64, Ordering};
 use std::time::Duration;
+use std::pin::Pin;
 use std::hash::Hash;
 
 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
 
+// We only need to select over multiple futures in one place, and taking on the full `tokio/macros`
+// dependency tree in order to do so (which has broken our MSRV before) is excessive. Instead, we
+// define a trivial two- and three- select macro with the specific types we need and just use that.
+
+pub(crate) enum SelectorOutput {
+       A(Option<()>), B(Option<()>), C(tokio::io::Result<()>),
+}
+
+pub(crate) struct TwoSelector<
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin
+> {
+       pub a: A,
+       pub b: B,
+}
+
+impl<
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin
+> Future for TwoSelector<A, B> {
+       type Output = SelectorOutput;
+       fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
+               match Pin::new(&mut self.a).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::A(res)); },
+                       Poll::Pending => {},
+               }
+               match Pin::new(&mut self.b).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::B(res)); },
+                       Poll::Pending => {},
+               }
+               Poll::Pending
+       }
+}
+
+pub(crate) struct ThreeSelector<
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
+> {
+       pub a: A,
+       pub b: B,
+       pub c: C,
+}
+
+impl<
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
+> Future for ThreeSelector<A, B, C> {
+       type Output = SelectorOutput;
+       fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
+               match Pin::new(&mut self.a).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::A(res)); },
+                       Poll::Pending => {},
+               }
+               match Pin::new(&mut self.b).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::B(res)); },
+                       Poll::Pending => {},
+               }
+               match Pin::new(&mut self.c).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
+                       Poll::Pending => {},
+               }
+               Poll::Pending
+       }
+}
+
 /// Connection contains all our internal state for a connection - we hold a reference to the
 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
 /// read future (which is returned by schedule_read).
 struct Connection {
-       writer: Option<io::WriteHalf<TcpStream>>,
+       writer: Option<Arc<TcpStream>>,
        // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
        // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
        // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
@@ -93,7 +154,7 @@ impl Connection {
        async fn schedule_read<PM: Deref + 'static + Send + Sync + Clone>(
                peer_manager: PM,
                us: Arc<Mutex<Self>>,
-               mut reader: io::ReadHalf<TcpStream>,
+               reader: Arc<TcpStream>,
                mut read_wake_receiver: mpsc::Receiver<()>,
                mut write_avail_receiver: mpsc::Receiver<()>,
        ) where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
@@ -127,29 +188,49 @@ impl Connection {
                                }
                                us_lock.read_paused
                        };
-                       tokio::select! {
-                               v = write_avail_receiver.recv() => {
+                       // TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support.
+                       let select_result = if read_paused {
+                               TwoSelector {
+                                       a: Box::pin(write_avail_receiver.recv()),
+                                       b: Box::pin(read_wake_receiver.recv()),
+                               }.await
+                       } else {
+                               ThreeSelector {
+                                       a: Box::pin(write_avail_receiver.recv()),
+                                       b: Box::pin(read_wake_receiver.recv()),
+                                       c: Box::pin(reader.readable()),
+                               }.await
+                       };
+                       match select_result {
+                               SelectorOutput::A(v) => {
                                        assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
                                        if peer_manager.as_ref().write_buffer_space_avail(&mut our_descriptor).is_err() {
                                                break Disconnect::CloseConnection;
                                        }
                                },
-                               _ = read_wake_receiver.recv() => {},
-                               read = reader.read(&mut buf), if !read_paused => match read {
-                                       Ok(0) => break Disconnect::PeerDisconnected,
-                                       Ok(len) => {
-                                               let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
-                                               let mut us_lock = us.lock().unwrap();
-                                               match read_res {
-                                                       Ok(pause_read) => {
-                                                               if pause_read {
-                                                                       us_lock.read_paused = true;
-                                                               }
-                                                       },
-                                                       Err(_) => break Disconnect::CloseConnection,
-                                               }
-                                       },
-                                       Err(_) => break Disconnect::PeerDisconnected,
+                               SelectorOutput::B(_) => {},
+                               SelectorOutput::C(res) => {
+                                       if res.is_err() { break Disconnect::PeerDisconnected; }
+                                       match reader.try_read(&mut buf) {
+                                               Ok(0) => break Disconnect::PeerDisconnected,
+                                               Ok(len) => {
+                                                       let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
+                                                       let mut us_lock = us.lock().unwrap();
+                                                       match read_res {
+                                                               Ok(pause_read) => {
+                                                                       if pause_read {
+                                                                               us_lock.read_paused = true;
+                                                                       }
+                                                               },
+                                                               Err(_) => break Disconnect::CloseConnection,
+                                                       }
+                                               },
+                                               Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
+                                                       // readable() is allowed to spuriously wake, so we have to handle
+                                                       // WouldBlock here.
+                                               },
+                                               Err(_) => break Disconnect::PeerDisconnected,
+                                       }
                                },
                        }
                        let _ = event_waker.try_send(());
@@ -161,18 +242,14 @@ impl Connection {
                        // here.
                        let _ = tokio::task::yield_now().await;
                };
-               let writer_option = us.lock().unwrap().writer.take();
-               if let Some(mut writer) = writer_option {
-                       // If the socket is already closed, shutdown() will fail, so just ignore it.
-                       let _ = writer.shutdown().await;
-               }
+               us.lock().unwrap().writer.take();
                if let Disconnect::PeerDisconnected = disconnect_type {
                        peer_manager.as_ref().socket_disconnected(&our_descriptor);
                        peer_manager.as_ref().process_events();
                }
        }
 
-       fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
+       fn new(stream: StdTcpStream) -> (Arc<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
                // We only ever need a channel of depth 1 here: if we returned a non-full write to the
                // PeerManager, we will eventually get notified that there is room in the socket to write
                // new bytes, which will generate an event. That event will be popped off the queue before
@@ -184,24 +261,24 @@ impl Connection {
                // false.
                let (read_waker, read_receiver) = mpsc::channel(1);
                stream.set_nonblocking(true).unwrap();
-               let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
+               let tokio_stream = Arc::new(TcpStream::from_std(stream).unwrap());
 
-               (reader, write_receiver, read_receiver,
+               (Arc::clone(&tokio_stream), write_receiver, read_receiver,
                Arc::new(Mutex::new(Self {
-                       writer: Some(writer), write_avail, read_waker, read_paused: false,
+                       writer: Some(tokio_stream), write_avail, read_waker, read_paused: false,
                        rl_requested_disconnect: false,
                        id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
                })))
        }
 }
 
-fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
+fn get_addr_from_stream(stream: &StdTcpStream) -> Option<SocketAddress> {
        match stream.peer_addr() {
-               Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
+               Ok(SocketAddr::V4(sockaddr)) => Some(SocketAddress::TcpIpV4 {
                        addr: sockaddr.ip().octets(),
                        port: sockaddr.port(),
                }),
-               Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
+               Ok(SocketAddr::V6(sockaddr)) => Some(SocketAddress::TcpIpV6 {
                        addr: sockaddr.ip().octets(),
                        port: sockaddr.port(),
                }),
@@ -344,7 +421,11 @@ const SOCK_WAKER_VTABLE: task::RawWakerVTable =
        task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
 
 fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
-       write_avail_to_waker(orig_ptr as *const mpsc::Sender<()>)
+       let new_waker = unsafe { Arc::from_raw(orig_ptr as *const mpsc::Sender<()>) };
+       let res = write_avail_to_waker(&new_waker);
+       // Don't decrement the refcount when dropping new_waker by turning it back `into_raw`.
+       let _ = Arc::into_raw(new_waker);
+       res
 }
 // When waking, an error should be fine. Most likely we got two send_datas in a row, both of which
 // failed to fully write, but we only need to call write_buffer_space_avail() once. Otherwise, the
@@ -357,16 +438,15 @@ fn wake_socket_waker(orig_ptr: *const ()) {
 }
 fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
        let sender_ptr = orig_ptr as *const mpsc::Sender<()>;
-       let sender = unsafe { (*sender_ptr).clone() };
+       let sender = unsafe { &*sender_ptr };
        let _ = sender.try_send(());
 }
 fn drop_socket_waker(orig_ptr: *const ()) {
-       let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
-       // _orig_box is now dropped
+       let _orig_arc = unsafe { Arc::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
+       // _orig_arc is now dropped
 }
-fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker {
-       let new_box = Box::leak(Box::new(unsafe { (*sender).clone() }));
-       let new_ptr = new_box as *const mpsc::Sender<()>;
+fn write_avail_to_waker(sender: &Arc<mpsc::Sender<()>>) -> task::RawWaker {
+       let new_ptr = Arc::into_raw(Arc::clone(&sender));
        task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
 }
 
@@ -374,19 +454,27 @@ fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker {
 /// type in the template of PeerHandler.
 pub struct SocketDescriptor {
        conn: Arc<Mutex<Connection>>,
+       // We store a copy of the mpsc::Sender to wake the read task in an Arc here. While we can
+       // simply clone the sender and store a copy in each waker, that would require allocating for
+       // each waker. Instead, we can simply `Arc::clone`, creating a new reference and store the
+       // pointer in the waker.
+       write_avail_sender: Arc<mpsc::Sender<()>>,
        id: u64,
 }
 impl SocketDescriptor {
        fn new(conn: Arc<Mutex<Connection>>) -> Self {
-               let id = conn.lock().unwrap().id;
-               Self { conn, id }
+               let (id, write_avail_sender) = {
+                       let us = conn.lock().unwrap();
+                       (us.id, Arc::new(us.write_avail.clone()))
+               };
+               Self { conn, id, write_avail_sender }
        }
 }
 impl peer_handler::SocketDescriptor for SocketDescriptor {
        fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
-               // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
-               // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
-               // SocketDescriptor in it which can wake up the write_avail Sender, waking up the
+               // To send data, we take a lock on our Connection to access the TcpStream, writing to it if
+               // there's room in the kernel buffer, or otherwise create a new Waker with a
+               // SocketDescriptor in it which can wake up the write_avail Sender, waking up the
                // processing future which will call write_buffer_space_avail and we'll end up back here.
                let mut us = self.conn.lock().unwrap();
                if us.writer.is_none() {
@@ -402,28 +490,25 @@ impl peer_handler::SocketDescriptor for SocketDescriptor {
                        let _ = us.read_waker.try_send(());
                }
                if data.is_empty() { return 0; }
-               let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&us.write_avail)) };
+               let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&self.write_avail_sender)) };
                let mut ctx = task::Context::from_waker(&waker);
                let mut written_len = 0;
                loop {
-                       match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
-                               task::Poll::Ready(Ok(res)) => {
-                                       // The tokio docs *seem* to indicate this can't happen, and I certainly don't
-                                       // know how to handle it if it does (cause it should be a Poll::Pending
-                                       // instead):
-                                       assert_ne!(res, 0);
-                                       written_len += res;
-                                       if written_len == data.len() { return written_len; }
-                               },
-                               task::Poll::Ready(Err(e)) => {
-                                       // The tokio docs *seem* to indicate this can't happen, and I certainly don't
-                                       // know how to handle it if it does (cause it should be a Poll::Pending
-                                       // instead):
-                                       assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
-                                       // Probably we've already been closed, just return what we have and let the
-                                       // read thread handle closing logic.
-                                       return written_len;
+                       match us.writer.as_ref().unwrap().poll_write_ready(&mut ctx) {
+                               task::Poll::Ready(Ok(())) => {
+                                       match us.writer.as_ref().unwrap().try_write(&data[written_len..]) {
+                                               Ok(res) => {
+                                                       debug_assert_ne!(res, 0);
+                                                       written_len += res;
+                                                       if written_len == data.len() { return written_len; }
+                                               },
+                                               Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
+                                                       continue;
+                                               }
+                                               Err(_) => return written_len,
+                                       }
                                },
+                               task::Poll::Ready(Err(_)) => return written_len,
                                task::Poll::Pending => {
                                        // We're queued up for a write event now, but we need to make sure we also
                                        // pause read given we're now waiting on the remote end to ACK (and in
@@ -450,6 +535,7 @@ impl Clone for SocketDescriptor {
                Self {
                        conn: Arc::clone(&self.conn),
                        id: self.id,
+                       write_avail_sender: Arc::clone(&self.write_avail_sender),
                }
        }
 }
@@ -470,7 +556,6 @@ mod tests {
        use lightning::ln::features::*;
        use lightning::ln::msgs::*;
        use lightning::ln::peer_handler::{MessageHandler, PeerManager};
-       use lightning::ln::features::NodeFeatures;
        use lightning::routing::gossip::NodeId;
        use lightning::events::*;
        use lightning::util::test_utils::TestNodeSigner;
@@ -487,7 +572,7 @@ mod tests {
 
        pub struct TestLogger();
        impl lightning::util::logger::Logger for TestLogger {
-               fn log(&self, record: &lightning::util::logger::Record) {
+               fn log(&self, record: lightning::util::logger::Record) {
                        println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
                }
        }
@@ -533,6 +618,10 @@ mod tests {
                fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
                fn handle_open_channel_v2(&self, _their_node_id: &PublicKey, _msg: &OpenChannelV2) {}
                fn handle_accept_channel_v2(&self, _their_node_id: &PublicKey, _msg: &AcceptChannelV2) {}
+               fn handle_stfu(&self, _their_node_id: &PublicKey, _msg: &Stfu) {}
+               fn handle_splice(&self, _their_node_id: &PublicKey, _msg: &Splice) {}
+               fn handle_splice_ack(&self, _their_node_id: &PublicKey, _msg: &SpliceAck) {}
+               fn handle_splice_locked(&self, _their_node_id: &PublicKey, _msg: &SpliceLocked) {}
                fn handle_tx_add_input(&self, _their_node_id: &PublicKey, _msg: &TxAddInput) {}
                fn handle_tx_add_output(&self, _their_node_id: &PublicKey, _msg: &TxAddOutput) {}
                fn handle_tx_remove_input(&self, _their_node_id: &PublicKey, _msg: &TxRemoveInput) {}
@@ -558,7 +647,7 @@ mod tests {
                fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {}
                fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
                fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
-               fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
+               fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
                        Some(vec![ChainHash::using_genesis_block(Network::Testnet)])
                }
        }