Rewrite lightning-net-tokio using async/await and tokio 0.2
[rust-lightning] / lightning-net-tokio / src / lib.rs
1 use secp256k1::key::PublicKey;
2
3 use tokio::net::TcpStream;
4 use tokio::{io, time};
5 use tokio::sync::{mpsc, oneshot};
6 use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
7
8 use lightning::ln::peer_handler;
9 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
10 use lightning::ln::msgs::ChannelMessageHandler;
11
12 use std::task;
13 use std::net::SocketAddr;
14 use std::sync::{Arc, Mutex};
15 use std::sync::atomic::{AtomicU64, Ordering};
16 use std::time::Duration;
17 use std::hash::Hash;
18
19 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
20
21 /// A connection to a remote peer. Can be constructed either as a remote connection using
22 /// Connection::setup_outbound
23 pub struct Connection {
24         writer: Option<io::WriteHalf<TcpStream>>,
25         event_notify: mpsc::Sender<()>,
26         // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
27         // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
28         // between being woken up with write-ready and calling PeerManager::write_event. This provides
29         // that indirection, with a Sender which gets handed to the PeerManager Arc on the
30         // schedule_read stack.
31         //
32         // An alternative (likely more effecient) approach would involve creating a RawWakerVTable at
33         // runtime with functions templated by the Arc<PeerManager> type, calling write_event directly
34         // from tokio's write wake, however doing so would require more unsafe voodo than I really feel
35         // like writing.
36         write_avail: mpsc::Sender<()>,
37         // When we are told by rust-lightning to pause read (because we have writes backing up), we do
38         // so by setting read_paused. If the read thread thereafter reads some data, it will place a
39         // Sender here and then block on it.
40         read_blocker: Option<oneshot::Sender<()>>,
41         read_paused: bool,
42         // If we get disconnected via SocketDescriptor::disconnect_socket(), we don't call
43         // disconnect_event(), but if we get an Err return value out of PeerManager, in general, we do.
44         // We track here whether we'll need to call disconnect_event() after the socket closes.
45         need_disconnect_event: bool,
46         disconnect: bool,
47         id: u64,
48 }
49 impl Connection {
50         async fn schedule_read<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut write_event: mpsc::Receiver<()>) {
51                 let peer_manager_ref = peer_manager.clone();
52                 let mut buf = [0; 8192];
53                 loop {
54                         macro_rules! shutdown_socket {
55                                 ($err: expr) => { {
56                                         println!("Disconnecting peer due to {}!", $err);
57                                         break;
58                                 } }
59                         }
60
61                         // Whenever we want to block, we have to at least select with the write_event Receiver,
62                         // which is used by the SocketDescriptor to wake us up if we need to shut down the
63                         // socket or if we need to generate a write_event.
64                         macro_rules! select_write_ev {
65                                 ($v: expr) => { {
66                                         assert!($v.is_some()); // We can't have dropped the sending end, its in the us Arc!
67                                         if us.lock().unwrap().disconnect {
68                                                 shutdown_socket!("disconnect_socket() call from RL");
69                                         }
70                                         if let Err(e) = peer_manager.write_event(&mut SocketDescriptor::new(us.clone())) {
71                                                 shutdown_socket!(e);
72                                         }
73                                 } }
74                         }
75
76                         tokio::select! {
77                                 v = write_event.recv() => select_write_ev!(v),
78                                 read = reader.read(&mut buf) => match read {
79                                         Ok(0) => {
80                                                 println!("Connection closed");
81                                                 break;
82                                         },
83                                         Ok(len) => {
84                                                 if let Some(blocker) = {
85                                                         let mut lock = us.lock().unwrap();
86                                                         if lock.disconnect {
87                                                                 shutdown_socket!("disconnect_socket() call from RL");
88                                                         }
89                                                         if lock.read_paused {
90                                                                 let (sender, blocker) = oneshot::channel();
91                                                                 lock.read_blocker = Some(sender);
92                                                                 Some(blocker)
93                                                         } else { None }
94                                                 } {
95                                                         tokio::select! {
96                                                                 res = blocker => {
97                                                                         res.unwrap(); // We should never drop the sender without sending () into it!
98                                                                         if us.lock().unwrap().disconnect {
99                                                                                 shutdown_socket!("disconnect_socket() call from RL");
100                                                                         }
101                                                                 },
102                                                                 v = write_event.recv() => select_write_ev!(v),
103                                                         }
104                                                 }
105                                                 match peer_manager.read_event(&mut SocketDescriptor::new(Arc::clone(&us)), &buf[0..len]) {
106                                                         Ok(pause_read) => {
107                                                                 if pause_read {
108                                                                         let mut lock = us.lock().unwrap();
109                                                                         lock.read_paused = true;
110                                                                 }
111
112                                                                 if let Err(mpsc::error::TrySendError::Full(_)) = us.lock().unwrap().event_notify.try_send(()) {
113                                                                         // Ignore full errors as we just need them to poll after this point, so if the user
114                                                                         // hasn't received the last send yet, it doesn't matter.
115                                                                 } else {
116                                                                         panic!();
117                                                                 }
118                                                         },
119                                                         Err(e) => shutdown_socket!(e),
120                                                 }
121                                         },
122                                         Err(e) => {
123                                                 println!("Connection closed: {}", e);
124                                                 break;
125                                         },
126                                 },
127                         }
128                 }
129                 let writer_option = us.lock().unwrap().writer.take();
130                 if let Some(mut writer) = writer_option {
131                         writer.shutdown().await.expect("We should be able to shutdown() a socket, even if it is already disconnected");
132                 }
133                 if us.lock().unwrap().need_disconnect_event {
134                         peer_manager_ref.disconnect_event(&SocketDescriptor::new(Arc::clone(&us)));
135                         if let Err(mpsc::error::TrySendError::Full(_)) = us.lock().unwrap().event_notify.try_send(()) {
136                                 // Ignore full errors as we just need them to poll after this point, so if the user
137                                 // hasn't received the last send yet, it doesn't matter.
138                         } else {
139                                 panic!();
140                         }
141                 }
142         }
143
144         fn new(event_notify: mpsc::Sender<()>, stream: TcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
145                 // We only ever need a channel of depth 1 here: if we returned a non-full write to the
146                 // PeerManager, we will eventually get notified that there is room in the socket to write
147                 // new bytes, which will generate an event. That event will be popped off the queue before
148                 // we call write_event, ensuring that we have room to push a new () if, during the
149                 // write_event() call, send_data() returns a non-full write.
150                 let (write_avail, receiver) = mpsc::channel(1);
151                 let (reader, writer) = io::split(stream);
152
153                 (reader, receiver,
154                 Arc::new(Mutex::new(Self {
155                         writer: Some(writer), event_notify, write_avail,
156                         read_blocker: None, read_paused: false, need_disconnect_event: true, disconnect: false,
157                         id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
158                 })))
159         }
160
161         /// Process incoming messages and feed outgoing messages on the provided socket generated by
162         /// accepting an incoming connection (by scheduling futures with tokio::spawn).
163         ///
164         /// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
165         /// ChannelManager and ChannelMonitor objects.
166         pub async fn setup_inbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, stream: TcpStream) {
167                 let (reader, receiver, us) = Self::new(event_notify, stream);
168
169                 if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone())) {
170                         tokio::spawn(Self::schedule_read(peer_manager, us, reader, receiver)).await;
171                 }
172         }
173
174         /// Process incoming messages and feed outgoing messages on the provided socket generated by
175         /// making an outbound connection which is expected to be accepted by a peer with the given
176         /// public key (by scheduling futures with tokio::spawn).
177         ///
178         /// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
179         /// ChannelManager and ChannelMonitor objects.
180         pub async fn setup_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: TcpStream) {
181                 let (reader, receiver, us) = Self::new(event_notify, stream);
182
183                 if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) {
184                         if SocketDescriptor::new(us.clone()).send_data(&initial_send, true) == initial_send.len() {
185                                 tokio::spawn(Self::schedule_read(peer_manager, us, reader, receiver)).await;
186                         } else {
187                                 // Note that we will skip disconnect_event here, in accordance with the PeerManager
188                                 // requirements, as disconnect_event is called by the schedule_read Future.
189                                 println!("Failed to write first full message to socket!");
190                         }
191                 }
192         }
193
194         /// Process incoming messages and feed outgoing messages on a new connection made to the given
195         /// socket address which is expected to be accepted by a peer with the given public key (by
196         /// scheduling futures with tokio::spawn).
197         ///
198         /// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
199         /// ChannelManager and ChannelMonitor objects.
200         pub async fn connect_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) {
201                 let connect_timeout = time::delay_for(Duration::from_secs(10));
202                 let connect_fut = TcpStream::connect(&addr);
203                 tokio::select! {
204                         _ = connect_timeout => { },
205                         res = connect_fut => {
206                                 if let Ok(stream) = res {
207                                         Connection::setup_outbound(peer_manager, event_notify, their_node_id, stream).await;
208                                 }
209                         },
210                 };
211         }
212 }
213
214 const SOCK_WAKER_VTABLE: task::RawWakerVTable =
215         task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
216
217 fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
218         descriptor_to_waker(orig_ptr as *const SocketDescriptor)
219 }
220 fn wake_socket_waker(orig_ptr: *const ()) {
221         wake_socket_waker_by_ref(orig_ptr);
222         drop_socket_waker(orig_ptr);
223 }
224 fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
225         let descriptor = orig_ptr as *const SocketDescriptor;
226         // An error should be fine. Most likely we got two send_datas in a row, both of which failed to
227         // fully write, but we only need to provide a write_event() once. Otherwise, the sending thread
228         // may have already gone away due to a socket close, in which case there's nothing to wake up
229         // anyway.
230         let _ = unsafe { (*descriptor).conn.lock() }.unwrap().write_avail.try_send(());
231 }
232 fn drop_socket_waker(orig_ptr: *const ()) {
233         let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut SocketDescriptor) };
234         // _orig_box is now dropped
235 }
236 fn descriptor_to_waker(descriptor: *const SocketDescriptor) -> task::RawWaker {
237         let new_box = Box::leak(Box::new(unsafe { (*descriptor).clone() }));
238         let new_ptr = new_box as *const SocketDescriptor;
239         task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
240 }
241
242 pub struct SocketDescriptor {
243         conn: Arc<Mutex<Connection>>,
244         id: u64,
245 }
246 impl SocketDescriptor {
247         fn new(conn: Arc<Mutex<Connection>>) -> Self {
248                 let id = conn.lock().unwrap().id;
249                 Self { conn, id }
250         }
251 }
252 impl peer_handler::SocketDescriptor for SocketDescriptor {
253         fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
254                 let mut us = self.conn.lock().unwrap();
255                 if us.writer.is_none() {
256                         // The writer gets take()n when its time to shut down, so just fast-return 0 here.
257                         return 0;
258                 }
259
260                 if resume_read {
261                         if let Some(sender) = us.read_blocker.take() {
262                                 sender.send(()).unwrap();
263                         }
264                         us.read_paused = false;
265                 }
266                 if data.is_empty() { return 0; }
267                 let waker = unsafe { task::Waker::from_raw(descriptor_to_waker(self)) };
268                 let mut ctx = task::Context::from_waker(&waker);
269                 let mut written_len = 0;
270                 loop {
271                         match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
272                                 task::Poll::Ready(Ok(res)) => {
273                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
274                                         // know how to handle it if it does (cause it should be a Poll::Pending
275                                         // instead):
276                                         assert_ne!(res, 0);
277                                         written_len += res;
278                                         if written_len == data.len() { return written_len; }
279                                 },
280                                 task::Poll::Ready(Err(e)) => {
281                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
282                                         // know how to handle it if it does (cause it should be a Poll::Pending
283                                         // instead):
284                                         assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
285                                         // Probably we've already been closed, just return what we have and let the
286                                         // read thread handle closing logic.
287                                         return written_len;
288                                 },
289                                 task::Poll::Pending => {
290                                         // We're queued up for a write event now, but we need to make sure we also
291                                         // pause read given we're now waiting on the remote end to ACK (and in
292                                         // accordance with the send_data() docs).
293                                         us.read_paused = true;
294                                         return written_len;
295                                 },
296                         }
297                 }
298         }
299
300         fn disconnect_socket(&mut self) {
301                 let mut us = self.conn.lock().unwrap();
302                 us.need_disconnect_event = false;
303                 us.disconnect = true;
304                 us.read_paused = true;
305                 // Wake up the sending thread, assuming its still alive
306                 let _ = us.write_avail.try_send(());
307                 // TODO: There's a race where we don't meet the requirements of disconnect_socket if the
308                 // read task is about to call a PeerManager function (eg read_event or write_event).
309                 // Ideally we need to release the us lock and block until we have confirmation from the
310                 // read task that it has broken out of its main loop.
311         }
312 }
313 impl Clone for SocketDescriptor {
314         fn clone(&self) -> Self {
315                 Self {
316                         conn: Arc::clone(&self.conn),
317                         id: self.id,
318                 }
319         }
320 }
321 impl Eq for SocketDescriptor {}
322 impl PartialEq for SocketDescriptor {
323         fn eq(&self, o: &Self) -> bool {
324                 self.id == o.id
325         }
326 }
327 impl Hash for SocketDescriptor {
328         fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
329                 self.id.hash(state);
330         }
331 }
332