-extern crate bytes;
-extern crate tokio;
-extern crate tokio_codec;
-extern crate futures;
-extern crate lightning;
-extern crate secp256k1;
-
-use bytes::BufMut;
-
-use futures::future;
-use futures::future::Future;
-use futures::{AsyncSink, Stream, Sink};
-use futures::sync::mpsc;
-
use secp256k1::key::PublicKey;
-use tokio::timer::Delay;
use tokio::net::TcpStream;
+use tokio::{io, time};
+use tokio::sync::{mpsc, oneshot};
+use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
use lightning::ln::msgs::ChannelMessageHandler;
-use std::mem;
+use std::task;
use std::net::SocketAddr;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicU64, Ordering};
-use std::time::{Duration, Instant};
-use std::vec::Vec;
+use std::time::Duration;
use std::hash::Hash;
static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
/// A connection to a remote peer. Can be constructed either as a remote connection using
-/// Connection::setup_outbound o
+/// Connection::setup_outbound
pub struct Connection {
- writer: Option<mpsc::Sender<bytes::Bytes>>,
+ writer: Option<io::WriteHalf<TcpStream>>,
event_notify: mpsc::Sender<()>,
- pending_read: Vec<u8>,
- read_blocker: Option<futures::sync::oneshot::Sender<Result<(), ()>>>,
+ // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
+ // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
+ // between being woken up with write-ready and calling PeerManager::write_event. This provides
+ // that indirection, with a Sender which gets handed to the PeerManager Arc on the
+ // schedule_read stack.
+ //
+ // An alternative (likely more effecient) approach would involve creating a RawWakerVTable at
+ // runtime with functions templated by the Arc<PeerManager> type, calling write_event directly
+ // from tokio's write wake, however doing so would require more unsafe voodo than I really feel
+ // like writing.
+ write_avail: mpsc::Sender<()>,
+ // When we are told by rust-lightning to pause read (because we have writes backing up), we do
+ // so by setting read_paused. If the read thread thereafter reads some data, it will place a
+ // Sender here and then block on it.
+ read_blocker: Option<oneshot::Sender<()>>,
read_paused: bool,
- need_disconnect: bool,
+ // If we get disconnected via SocketDescriptor::disconnect_socket(), we don't call
+ // disconnect_event(), but if we get an Err return value out of PeerManager, in general, we do.
+ // We track here whether we'll need to call disconnect_event() after the socket closes.
+ need_disconnect_event: bool,
+ disconnect: bool,
id: u64,
}
impl Connection {
- fn schedule_read<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>, us: Arc<Mutex<Self>>, reader: futures::stream::SplitStream<tokio_codec::Framed<TcpStream, tokio_codec::BytesCodec>>) {
- let us_ref = us.clone();
- let us_close_ref = us.clone();
+ async fn schedule_read<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut write_event: mpsc::Receiver<()>) {
let peer_manager_ref = peer_manager.clone();
- tokio::spawn(reader.for_each(move |b| {
- let pending_read = b.to_vec();
- {
- let mut lock = us_ref.lock().unwrap();
- assert!(lock.pending_read.is_empty());
- if lock.read_paused {
- lock.pending_read = pending_read;
- let (sender, blocker) = futures::sync::oneshot::channel();
- lock.read_blocker = Some(sender);
- return future::Either::A(blocker.then(|_| { Ok(()) }));
- }
+ let mut buf = [0; 8192];
+ loop {
+ macro_rules! shutdown_socket {
+ ($err: expr) => { {
+ println!("Disconnecting peer due to {}!", $err);
+ break;
+ } }
}
- //TODO: There's a race where we don't meet the requirements of disconnect_socket if its
- //called right here, after we release the us_ref lock in the scope above, but before we
- //call read_event!
- match peer_manager.read_event(&mut SocketDescriptor::new(us_ref.clone(), peer_manager.clone()), &pending_read) {
- Ok(pause_read) => {
- if pause_read {
- let mut lock = us_ref.lock().unwrap();
- lock.read_paused = true;
+
+ // Whenever we want to block, we have to at least select with the write_event Receiver,
+ // which is used by the SocketDescriptor to wake us up if we need to shut down the
+ // socket or if we need to generate a write_event.
+ macro_rules! select_write_ev {
+ ($v: expr) => { {
+ assert!($v.is_some()); // We can't have dropped the sending end, its in the us Arc!
+ if us.lock().unwrap().disconnect {
+ shutdown_socket!("disconnect_socket() call from RL");
}
- },
- Err(e) => {
- us_ref.lock().unwrap().need_disconnect = false;
- return future::Either::B(future::result(Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e))));
- }
+ if let Err(e) = peer_manager.write_event(&mut SocketDescriptor::new(us.clone())) {
+ shutdown_socket!(e);
+ }
+ } }
}
- if let Err(e) = us_ref.lock().unwrap().event_notify.try_send(()) {
+ tokio::select! {
+ v = write_event.recv() => select_write_ev!(v),
+ read = reader.read(&mut buf) => match read {
+ Ok(0) => {
+ println!("Connection closed");
+ break;
+ },
+ Ok(len) => {
+ if let Some(blocker) = {
+ let mut lock = us.lock().unwrap();
+ if lock.disconnect {
+ shutdown_socket!("disconnect_socket() call from RL");
+ }
+ if lock.read_paused {
+ let (sender, blocker) = oneshot::channel();
+ lock.read_blocker = Some(sender);
+ Some(blocker)
+ } else { None }
+ } {
+ tokio::select! {
+ res = blocker => {
+ res.unwrap(); // We should never drop the sender without sending () into it!
+ if us.lock().unwrap().disconnect {
+ shutdown_socket!("disconnect_socket() call from RL");
+ }
+ },
+ v = write_event.recv() => select_write_ev!(v),
+ }
+ }
+ match peer_manager.read_event(&mut SocketDescriptor::new(Arc::clone(&us)), &buf[0..len]) {
+ Ok(pause_read) => {
+ if pause_read {
+ let mut lock = us.lock().unwrap();
+ lock.read_paused = true;
+ }
+
+ if let Err(mpsc::error::TrySendError::Full(_)) = us.lock().unwrap().event_notify.try_send(()) {
+ // Ignore full errors as we just need them to poll after this point, so if the user
+ // hasn't received the last send yet, it doesn't matter.
+ } else {
+ panic!();
+ }
+ },
+ Err(e) => shutdown_socket!(e),
+ }
+ },
+ Err(e) => {
+ println!("Connection closed: {}", e);
+ break;
+ },
+ },
+ }
+ }
+ let writer_option = us.lock().unwrap().writer.take();
+ if let Some(mut writer) = writer_option {
+ writer.shutdown().await.expect("We should be able to shutdown() a socket, even if it is already disconnected");
+ }
+ if us.lock().unwrap().need_disconnect_event {
+ peer_manager_ref.disconnect_event(&SocketDescriptor::new(Arc::clone(&us)));
+ if let Err(mpsc::error::TrySendError::Full(_)) = us.lock().unwrap().event_notify.try_send(()) {
// Ignore full errors as we just need them to poll after this point, so if the user
// hasn't received the last send yet, it doesn't matter.
- assert!(e.is_full());
- }
-
- future::Either::B(future::result(Ok(())))
- }).then(move |_| {
- if us_close_ref.lock().unwrap().need_disconnect {
- peer_manager_ref.disconnect_event(&SocketDescriptor::new(us_close_ref, peer_manager_ref.clone()));
- println!("Peer disconnected!");
} else {
- println!("We disconnected peer!");
+ panic!();
}
- Ok(())
- }));
+ }
}
- fn new(event_notify: mpsc::Sender<()>, stream: TcpStream) -> (futures::stream::SplitStream<tokio_codec::Framed<TcpStream, tokio_codec::BytesCodec>>, Arc<Mutex<Self>>) {
- let (writer, reader) = tokio_codec::Framed::new(stream, tokio_codec::BytesCodec::new()).split();
- let (send_sink, send_stream) = mpsc::channel(3);
- tokio::spawn(writer.send_all(send_stream.map_err(|_| -> std::io::Error {
- unreachable!();
- })).then(|_| {
- future::result(Ok(()))
- }));
- let us = Arc::new(Mutex::new(Self { writer: Some(send_sink), event_notify, pending_read: Vec::new(), read_blocker: None, read_paused: false, need_disconnect: true, id: ID_COUNTER.fetch_add(1, Ordering::AcqRel) }));
+ fn new(event_notify: mpsc::Sender<()>, stream: TcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
+ // We only ever need a channel of depth 1 here: if we returned a non-full write to the
+ // PeerManager, we will eventually get notified that there is room in the socket to write
+ // new bytes, which will generate an event. That event will be popped off the queue before
+ // we call write_event, ensuring that we have room to push a new () if, during the
+ // write_event() call, send_data() returns a non-full write.
+ let (write_avail, receiver) = mpsc::channel(1);
+ let (reader, writer) = io::split(stream);
- (reader, us)
+ (reader, receiver,
+ Arc::new(Mutex::new(Self {
+ writer: Some(writer), event_notify, write_avail,
+ read_blocker: None, read_paused: false, need_disconnect_event: true, disconnect: false,
+ id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
+ })))
}
/// Process incoming messages and feed outgoing messages on the provided socket generated by
///
/// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
/// ChannelManager and ChannelMonitor objects.
- pub fn setup_inbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>, event_notify: mpsc::Sender<()>, stream: TcpStream) {
- let (reader, us) = Self::new(event_notify, stream);
+ pub async fn setup_inbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, stream: TcpStream) {
+ let (reader, receiver, us) = Self::new(event_notify, stream);
- if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone(), peer_manager.clone())) {
- Self::schedule_read(peer_manager, us, reader);
+ if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone())) {
+ tokio::spawn(Self::schedule_read(peer_manager, us, reader, receiver)).await;
}
}
///
/// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
/// ChannelManager and ChannelMonitor objects.
- pub fn setup_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: TcpStream) {
- let (reader, us) = Self::new(event_notify, stream);
+ pub async fn setup_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: TcpStream) {
+ let (reader, receiver, us) = Self::new(event_notify, stream);
- if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone(), peer_manager.clone())) {
- if SocketDescriptor::new(us.clone(), peer_manager.clone()).send_data(&initial_send, true) == initial_send.len() {
- Self::schedule_read(peer_manager, us, reader);
+ if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) {
+ if SocketDescriptor::new(us.clone()).send_data(&initial_send, true) == initial_send.len() {
+ tokio::spawn(Self::schedule_read(peer_manager, us, reader, receiver)).await;
} else {
+ // Note that we will skip disconnect_event here, in accordance with the PeerManager
+ // requirements, as disconnect_event is called by the schedule_read Future.
println!("Failed to write first full message to socket!");
}
}
///
/// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
/// ChannelManager and ChannelMonitor objects.
- pub fn connect_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) {
- let connect_timeout = Delay::new(Instant::now() + Duration::from_secs(10)).then(|_| {
- future::err(std::io::Error::new(std::io::ErrorKind::TimedOut, "timeout reached"))
- });
- tokio::spawn(TcpStream::connect(&addr).select(connect_timeout)
- .and_then(move |stream| {
- Connection::setup_outbound(peer_manager, event_notify, their_node_id, stream.0);
- future::ok(())
- }).or_else(|_| {
- //TODO: return errors somehow
- future::ok(())
- }));
+ pub async fn connect_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) {
+ let connect_timeout = time::delay_for(Duration::from_secs(10));
+ let connect_fut = TcpStream::connect(&addr);
+ tokio::select! {
+ _ = connect_timeout => { },
+ res = connect_fut => {
+ if let Ok(stream) = res {
+ Connection::setup_outbound(peer_manager, event_notify, their_node_id, stream).await;
+ }
+ },
+ };
}
}
-pub struct SocketDescriptor<CMH: ChannelMessageHandler + 'static> {
+const SOCK_WAKER_VTABLE: task::RawWakerVTable =
+ task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
+
+fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
+ descriptor_to_waker(orig_ptr as *const SocketDescriptor)
+}
+fn wake_socket_waker(orig_ptr: *const ()) {
+ wake_socket_waker_by_ref(orig_ptr);
+ drop_socket_waker(orig_ptr);
+}
+fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
+ let descriptor = orig_ptr as *const SocketDescriptor;
+ // An error should be fine. Most likely we got two send_datas in a row, both of which failed to
+ // fully write, but we only need to provide a write_event() once. Otherwise, the sending thread
+ // may have already gone away due to a socket close, in which case there's nothing to wake up
+ // anyway.
+ let _ = unsafe { (*descriptor).conn.lock() }.unwrap().write_avail.try_send(());
+}
+fn drop_socket_waker(orig_ptr: *const ()) {
+ let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut SocketDescriptor) };
+ // _orig_box is now dropped
+}
+fn descriptor_to_waker(descriptor: *const SocketDescriptor) -> task::RawWaker {
+ let new_box = Box::leak(Box::new(unsafe { (*descriptor).clone() }));
+ let new_ptr = new_box as *const SocketDescriptor;
+ task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
+}
+
+pub struct SocketDescriptor {
conn: Arc<Mutex<Connection>>,
id: u64,
- peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>,
}
-impl<CMH: ChannelMessageHandler> SocketDescriptor<CMH> {
- fn new(conn: Arc<Mutex<Connection>>, peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>) -> Self {
+impl SocketDescriptor {
+ fn new(conn: Arc<Mutex<Connection>>) -> Self {
let id = conn.lock().unwrap().id;
- Self { conn, id, peer_manager }
+ Self { conn, id }
}
}
-impl<CMH: ChannelMessageHandler> peer_handler::SocketDescriptor for SocketDescriptor<CMH> {
+impl peer_handler::SocketDescriptor for SocketDescriptor {
fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
- macro_rules! schedule_read {
- ($us_ref: expr) => {
- tokio::spawn(future::lazy(move || -> Result<(), ()> {
- let mut read_data = Vec::new();
- {
- let mut us = $us_ref.conn.lock().unwrap();
- mem::swap(&mut read_data, &mut us.pending_read);
- }
- if !read_data.is_empty() {
- let mut us_clone = $us_ref.clone();
- match $us_ref.peer_manager.read_event(&mut us_clone, &read_data) {
- Ok(pause_read) => {
- if pause_read { return Ok(()); }
- },
- Err(_) => {
- //TODO: Not actually sure how to do this
- return Ok(());
- }
- }
- }
- let mut us = $us_ref.conn.lock().unwrap();
- if let Some(sender) = us.read_blocker.take() {
- sender.send(Ok(())).unwrap();
- }
- us.read_paused = false;
- if let Err(e) = us.event_notify.try_send(()) {
- // Ignore full errors as we just need them to poll after this point, so if the user
- // hasn't received the last send yet, it doesn't matter.
- assert!(e.is_full());
- }
- Ok(())
- }));
- }
- }
-
let mut us = self.conn.lock().unwrap();
- if resume_read {
- let us_ref = self.clone();
- schedule_read!(us_ref);
- }
- if data.is_empty() { return 0; }
if us.writer.is_none() {
- us.read_paused = true;
+ // The writer gets take()n when its time to shut down, so just fast-return 0 here.
return 0;
}
- let mut bytes = bytes::BytesMut::with_capacity(data.len());
- bytes.put(data);
- let write_res = us.writer.as_mut().unwrap().start_send(bytes.freeze());
- match write_res {
- Ok(res) => {
- match res {
- AsyncSink::Ready => {
- data.len()
- },
- AsyncSink::NotReady(_) => {
- us.read_paused = true;
- let us_ref = self.clone();
- tokio::spawn(us.writer.take().unwrap().flush().then(move |writer_res| -> Result<(), ()> {
- if let Ok(writer) = writer_res {
- {
- let mut us = us_ref.conn.lock().unwrap();
- us.writer = Some(writer);
- }
- schedule_read!(us_ref);
- } // we'll fire the disconnect event on the socket reader end
- Ok(())
- }));
- 0
- }
- }
- },
- Err(_) => {
- // We'll fire the disconnected event on the socket reader end
- 0
- },
+ if resume_read {
+ if let Some(sender) = us.read_blocker.take() {
+ sender.send(()).unwrap();
+ }
+ us.read_paused = false;
+ }
+ if data.is_empty() { return 0; }
+ let waker = unsafe { task::Waker::from_raw(descriptor_to_waker(self)) };
+ let mut ctx = task::Context::from_waker(&waker);
+ let mut written_len = 0;
+ loop {
+ match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
+ task::Poll::Ready(Ok(res)) => {
+ // The tokio docs *seem* to indicate this can't happen, and I certainly don't
+ // know how to handle it if it does (cause it should be a Poll::Pending
+ // instead):
+ assert_ne!(res, 0);
+ written_len += res;
+ if written_len == data.len() { return written_len; }
+ },
+ task::Poll::Ready(Err(e)) => {
+ // The tokio docs *seem* to indicate this can't happen, and I certainly don't
+ // know how to handle it if it does (cause it should be a Poll::Pending
+ // instead):
+ assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
+ // Probably we've already been closed, just return what we have and let the
+ // read thread handle closing logic.
+ return written_len;
+ },
+ task::Poll::Pending => {
+ // We're queued up for a write event now, but we need to make sure we also
+ // pause read given we're now waiting on the remote end to ACK (and in
+ // accordance with the send_data() docs).
+ us.read_paused = true;
+ return written_len;
+ },
+ }
}
}
fn disconnect_socket(&mut self) {
let mut us = self.conn.lock().unwrap();
- us.need_disconnect = true;
+ us.need_disconnect_event = false;
+ us.disconnect = true;
us.read_paused = true;
+ // Wake up the sending thread, assuming its still alive
+ let _ = us.write_avail.try_send(());
+ // TODO: There's a race where we don't meet the requirements of disconnect_socket if the
+ // read task is about to call a PeerManager function (eg read_event or write_event).
+ // Ideally we need to release the us lock and block until we have confirmation from the
+ // read task that it has broken out of its main loop.
}
}
-impl<CMH: ChannelMessageHandler> Clone for SocketDescriptor<CMH> {
+impl Clone for SocketDescriptor {
fn clone(&self) -> Self {
Self {
conn: Arc::clone(&self.conn),
id: self.id,
- peer_manager: Arc::clone(&self.peer_manager),
}
}
}
-impl<CMH: ChannelMessageHandler> Eq for SocketDescriptor<CMH> {}
-impl<CMH: ChannelMessageHandler> PartialEq for SocketDescriptor<CMH> {
+impl Eq for SocketDescriptor {}
+impl PartialEq for SocketDescriptor {
fn eq(&self, o: &Self) -> bool {
self.id == o.id
}
}
-impl<CMH: ChannelMessageHandler> Hash for SocketDescriptor<CMH> {
+impl Hash for SocketDescriptor {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.id.hash(state);
}