//!
//! [`PeerManager`]: lightning::ln::peer_handler::PeerManager
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
#![deny(missing_docs)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use bitcoin::secp256k1::PublicKey;
use tokio::net::TcpStream;
-use tokio::{io, time};
+use tokio::time;
use tokio::sync::mpsc;
-use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
use lightning::ln::peer_handler::APeerManager;
-use lightning::ln::msgs::NetAddress;
+use lightning::ln::msgs::SocketAddress;
use std::ops::Deref;
use std::task::{self, Poll};
// define a trivial two- and three- select macro with the specific types we need and just use that.
pub(crate) enum SelectorOutput {
- A(Option<()>), B(Option<()>), C(tokio::io::Result<usize>),
+ A(Option<()>), B(Option<()>), C(tokio::io::Result<()>),
}
pub(crate) struct TwoSelector<
}
pub(crate) struct ThreeSelector<
- A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+ A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
> {
pub a: A,
pub b: B,
}
impl<
- A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+ A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
> Future for ThreeSelector<A, B, C> {
type Output = SelectorOutput;
fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
/// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
/// read future (which is returned by schedule_read).
struct Connection {
- writer: Option<io::WriteHalf<TcpStream>>,
+ writer: Option<Arc<TcpStream>>,
// Because our PeerManager is templated by user-provided types, and we can't (as far as I can
// tell) have a const RawWakerVTable built out of templated functions, we need some indirection
// between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
async fn schedule_read<PM: Deref + 'static + Send + Sync + Clone>(
peer_manager: PM,
us: Arc<Mutex<Self>>,
- mut reader: io::ReadHalf<TcpStream>,
+ reader: Arc<TcpStream>,
mut read_wake_receiver: mpsc::Receiver<()>,
mut write_avail_receiver: mpsc::Receiver<()>,
) where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
ThreeSelector {
a: Box::pin(write_avail_receiver.recv()),
b: Box::pin(read_wake_receiver.recv()),
- c: Box::pin(reader.read(&mut buf)),
+ c: Box::pin(reader.readable()),
}.await
};
match select_result {
break Disconnect::CloseConnection;
}
},
- SelectorOutput::B(_) => {},
- SelectorOutput::C(read) => {
- match read {
+ SelectorOutput::B(some) => {
+ // The mpsc Receiver should only return `None` if the write side has been
+ // dropped, but that shouldn't be possible since its referenced by the Self in
+ // `us`.
+ debug_assert!(some.is_some());
+ },
+ SelectorOutput::C(res) => {
+ if res.is_err() { break Disconnect::PeerDisconnected; }
+ match reader.try_read(&mut buf) {
Ok(0) => break Disconnect::PeerDisconnected,
Ok(len) => {
let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
Err(_) => break Disconnect::CloseConnection,
}
},
+ Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
+ // readable() is allowed to spuriously wake, so we have to handle
+ // WouldBlock here.
+ },
Err(_) => break Disconnect::PeerDisconnected,
}
},
// here.
let _ = tokio::task::yield_now().await;
};
- let writer_option = us.lock().unwrap().writer.take();
- if let Some(mut writer) = writer_option {
- // If the socket is already closed, shutdown() will fail, so just ignore it.
- let _ = writer.shutdown().await;
- }
+ us.lock().unwrap().writer.take();
if let Disconnect::PeerDisconnected = disconnect_type {
peer_manager.as_ref().socket_disconnected(&our_descriptor);
peer_manager.as_ref().process_events();
}
}
- fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
+ fn new(stream: StdTcpStream) -> (Arc<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
// We only ever need a channel of depth 1 here: if we returned a non-full write to the
// PeerManager, we will eventually get notified that there is room in the socket to write
// new bytes, which will generate an event. That event will be popped off the queue before
// false.
let (read_waker, read_receiver) = mpsc::channel(1);
stream.set_nonblocking(true).unwrap();
- let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
+ let tokio_stream = Arc::new(TcpStream::from_std(stream).unwrap());
- (reader, write_receiver, read_receiver,
+ (Arc::clone(&tokio_stream), write_receiver, read_receiver,
Arc::new(Mutex::new(Self {
- writer: Some(writer), write_avail, read_waker, read_paused: false,
+ writer: Some(tokio_stream), write_avail, read_waker, read_paused: false,
rl_requested_disconnect: false,
id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
})))
}
}
-fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
+fn get_addr_from_stream(stream: &StdTcpStream) -> Option<SocketAddress> {
match stream.peer_addr() {
- Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
+ Ok(SocketAddr::V4(sockaddr)) => Some(SocketAddress::TcpIpV4 {
addr: sockaddr.ip().octets(),
port: sockaddr.port(),
}),
- Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
+ Ok(SocketAddr::V6(sockaddr)) => Some(SocketAddress::TcpIpV6 {
addr: sockaddr.ip().octets(),
port: sockaddr.port(),
}),
task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
- write_avail_to_waker(orig_ptr as *const mpsc::Sender<()>)
+ let new_waker = unsafe { Arc::from_raw(orig_ptr as *const mpsc::Sender<()>) };
+ let res = write_avail_to_waker(&new_waker);
+ // Don't decrement the refcount when dropping new_waker by turning it back `into_raw`.
+ let _ = Arc::into_raw(new_waker);
+ res
}
// When waking, an error should be fine. Most likely we got two send_datas in a row, both of which
// failed to fully write, but we only need to call write_buffer_space_avail() once. Otherwise, the
}
fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
let sender_ptr = orig_ptr as *const mpsc::Sender<()>;
- let sender = unsafe { (*sender_ptr).clone() };
+ let sender = unsafe { &*sender_ptr };
let _ = sender.try_send(());
}
fn drop_socket_waker(orig_ptr: *const ()) {
- let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
- // _orig_box is now dropped
+ let _orig_arc = unsafe { Arc::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
+ // _orig_arc is now dropped
}
-fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker {
- let new_box = Box::leak(Box::new(unsafe { (*sender).clone() }));
- let new_ptr = new_box as *const mpsc::Sender<()>;
+fn write_avail_to_waker(sender: &Arc<mpsc::Sender<()>>) -> task::RawWaker {
+ let new_ptr = Arc::into_raw(Arc::clone(&sender));
task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
}
/// type in the template of PeerHandler.
pub struct SocketDescriptor {
conn: Arc<Mutex<Connection>>,
+ // We store a copy of the mpsc::Sender to wake the read task in an Arc here. While we can
+ // simply clone the sender and store a copy in each waker, that would require allocating for
+ // each waker. Instead, we can simply `Arc::clone`, creating a new reference and store the
+ // pointer in the waker.
+ write_avail_sender: Arc<mpsc::Sender<()>>,
id: u64,
}
impl SocketDescriptor {
fn new(conn: Arc<Mutex<Connection>>) -> Self {
- let id = conn.lock().unwrap().id;
- Self { conn, id }
+ let (id, write_avail_sender) = {
+ let us = conn.lock().unwrap();
+ (us.id, Arc::new(us.write_avail.clone()))
+ };
+ Self { conn, id, write_avail_sender }
}
}
impl peer_handler::SocketDescriptor for SocketDescriptor {
fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
- // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
- // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
- // a SocketDescriptor in it which can wake up the write_avail Sender, waking up the
+ // To send data, we take a lock on our Connection to access the TcpStream, writing to it if
+ // there's room in the kernel buffer, or otherwise create a new Waker with a
+ // SocketDescriptor in it which can wake up the write_avail Sender, waking up the
// processing future which will call write_buffer_space_avail and we'll end up back here.
let mut us = self.conn.lock().unwrap();
if us.writer.is_none() {
let _ = us.read_waker.try_send(());
}
if data.is_empty() { return 0; }
- let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&us.write_avail)) };
+ let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&self.write_avail_sender)) };
let mut ctx = task::Context::from_waker(&waker);
let mut written_len = 0;
loop {
- match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
- task::Poll::Ready(Ok(res)) => {
- // The tokio docs *seem* to indicate this can't happen, and I certainly don't
- // know how to handle it if it does (cause it should be a Poll::Pending
- // instead):
- assert_ne!(res, 0);
- written_len += res;
- if written_len == data.len() { return written_len; }
- },
- task::Poll::Ready(Err(e)) => {
- // The tokio docs *seem* to indicate this can't happen, and I certainly don't
- // know how to handle it if it does (cause it should be a Poll::Pending
- // instead):
- assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
- // Probably we've already been closed, just return what we have and let the
- // read thread handle closing logic.
- return written_len;
+ match us.writer.as_ref().unwrap().poll_write_ready(&mut ctx) {
+ task::Poll::Ready(Ok(())) => {
+ match us.writer.as_ref().unwrap().try_write(&data[written_len..]) {
+ Ok(res) => {
+ debug_assert_ne!(res, 0);
+ written_len += res;
+ if written_len == data.len() { return written_len; }
+ },
+ Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
+ continue;
+ }
+ Err(_) => return written_len,
+ }
},
+ task::Poll::Ready(Err(_)) => return written_len,
task::Poll::Pending => {
// We're queued up for a write event now, but we need to make sure we also
// pause read given we're now waiting on the remote end to ACK (and in
Self {
conn: Arc::clone(&self.conn),
id: self.id,
+ write_avail_sender: Arc::clone(&self.write_avail_sender),
}
}
}
use lightning::ln::features::*;
use lightning::ln::msgs::*;
use lightning::ln::peer_handler::{MessageHandler, PeerManager};
- use lightning::ln::features::NodeFeatures;
use lightning::routing::gossip::NodeId;
use lightning::events::*;
use lightning::util::test_utils::TestNodeSigner;
pub struct TestLogger();
impl lightning::util::logger::Logger for TestLogger {
- fn log(&self, record: &lightning::util::logger::Record) {
+ fn log(&self, record: lightning::util::logger::Record) {
println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
}
}
fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
fn handle_open_channel_v2(&self, _their_node_id: &PublicKey, _msg: &OpenChannelV2) {}
fn handle_accept_channel_v2(&self, _their_node_id: &PublicKey, _msg: &AcceptChannelV2) {}
+ fn handle_stfu(&self, _their_node_id: &PublicKey, _msg: &Stfu) {}
+ #[cfg(splicing)]
+ fn handle_splice(&self, _their_node_id: &PublicKey, _msg: &Splice) {}
+ #[cfg(splicing)]
+ fn handle_splice_ack(&self, _their_node_id: &PublicKey, _msg: &SpliceAck) {}
+ #[cfg(splicing)]
+ fn handle_splice_locked(&self, _their_node_id: &PublicKey, _msg: &SpliceLocked) {}
fn handle_tx_add_input(&self, _their_node_id: &PublicKey, _msg: &TxAddInput) {}
fn handle_tx_add_output(&self, _their_node_id: &PublicKey, _msg: &TxAddOutput) {}
fn handle_tx_remove_input(&self, _their_node_id: &PublicKey, _msg: &TxRemoveInput) {}
fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {}
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
- fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
+ fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
Some(vec![ChainHash::using_genesis_block(Network::Testnet)])
}
}