Correct `lightning-net-tokio` documentation, remove stale example
[rust-lightning] / lightning-net-tokio / src / lib.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! A socket handling library for those running in Tokio environments who wish to use
11 //! rust-lightning with native [`TcpStream`]s.
12 //!
13 //! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a
14 //! [`TcpStream`] and a reference to a [`PeerManager`] and the rest is handled".
15 //!
16 //! The [`PeerManager`], due to the fire-and-forget nature of this logic, must be a reference,
17 //! (e.g. an [`Arc`]) and must use the [`SocketDescriptor`] provided here as the [`PeerManager`]'s
18 //! `SocketDescriptor` implementation.
19 //!
20 //! Three methods are exposed to register a new connection for handling in [`tokio::spawn`] calls;
21 //! see their individual docs for details.
22 //!
23 //! [`PeerManager`]: lightning::ln::peer_handler::PeerManager
24
25 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
26 #![deny(broken_intra_doc_links)]
27 #![deny(private_intra_doc_links)]
28
29 #![deny(missing_docs)]
30 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
31
32 use bitcoin::secp256k1::PublicKey;
33
34 use tokio::net::TcpStream;
35 use tokio::{io, time};
36 use tokio::sync::mpsc;
37 use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
38
39 use lightning::chain::keysinterface::NodeSigner;
40 use lightning::ln::peer_handler;
41 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
42 use lightning::ln::peer_handler::CustomMessageHandler;
43 use lightning::ln::msgs::{ChannelMessageHandler, NetAddress, OnionMessageHandler, RoutingMessageHandler};
44 use lightning::util::logger::Logger;
45
46 use std::ops::Deref;
47 use std::task;
48 use std::net::SocketAddr;
49 use std::net::TcpStream as StdTcpStream;
50 use std::sync::{Arc, Mutex};
51 use std::sync::atomic::{AtomicU64, Ordering};
52 use std::time::Duration;
53 use std::hash::Hash;
54
55 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
56
57 /// Connection contains all our internal state for a connection - we hold a reference to the
58 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
59 /// read future (which is returned by schedule_read).
60 struct Connection {
61         writer: Option<io::WriteHalf<TcpStream>>,
62         // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
63         // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
64         // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
65         // This provides that indirection, with a Sender which gets handed to the PeerManager Arc on
66         // the schedule_read stack.
67         //
68         // An alternative (likely more effecient) approach would involve creating a RawWakerVTable at
69         // runtime with functions templated by the Arc<PeerManager> type, calling
70         // write_buffer_space_avail directly from tokio's write wake, however doing so would require
71         // more unsafe voodo than I really feel like writing.
72         write_avail: mpsc::Sender<()>,
73         // When we are told by rust-lightning to pause read (because we have writes backing up), we do
74         // so by setting read_paused. At that point, the read task will stop reading bytes from the
75         // socket. To wake it up (without otherwise changing its state, we can push a value into this
76         // Sender.
77         read_waker: mpsc::Sender<()>,
78         read_paused: bool,
79         rl_requested_disconnect: bool,
80         id: u64,
81 }
82 impl Connection {
83         async fn poll_event_process<PM, CMH, RMH, OMH, L, UMH, NS>(
84                 peer_manager: PM,
85                 mut event_receiver: mpsc::Receiver<()>,
86         ) where
87                         PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync,
88                         CMH: Deref + 'static + Send + Sync,
89                         RMH: Deref + 'static + Send + Sync,
90                         OMH: Deref + 'static + Send + Sync,
91                         L: Deref + 'static + Send + Sync,
92                         UMH: Deref + 'static + Send + Sync,
93                         NS: Deref + 'static + Send + Sync,
94                         CMH::Target: ChannelMessageHandler + Send + Sync,
95                         RMH::Target: RoutingMessageHandler + Send + Sync,
96                         OMH::Target: OnionMessageHandler + Send + Sync,
97                         L::Target: Logger + Send + Sync,
98                         UMH::Target: CustomMessageHandler + Send + Sync,
99                         NS::Target: NodeSigner + Send + Sync,
100         {
101                 loop {
102                         if event_receiver.recv().await.is_none() {
103                                 return;
104                         }
105                         peer_manager.process_events();
106                 }
107         }
108
109         async fn schedule_read<PM, CMH, RMH, OMH, L, UMH, NS>(
110                 peer_manager: PM,
111                 us: Arc<Mutex<Self>>,
112                 mut reader: io::ReadHalf<TcpStream>,
113                 mut read_wake_receiver: mpsc::Receiver<()>,
114                 mut write_avail_receiver: mpsc::Receiver<()>,
115         ) where
116                         PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
117                         CMH: Deref + 'static + Send + Sync,
118                         RMH: Deref + 'static + Send + Sync,
119                         OMH: Deref + 'static + Send + Sync,
120                         L: Deref + 'static + Send + Sync,
121                         UMH: Deref + 'static + Send + Sync,
122                         NS: Deref + 'static + Send + Sync,
123                         CMH::Target: ChannelMessageHandler + 'static + Send + Sync,
124                         RMH::Target: RoutingMessageHandler + 'static + Send + Sync,
125                         OMH::Target: OnionMessageHandler + 'static + Send + Sync,
126                         L::Target: Logger + 'static + Send + Sync,
127                         UMH::Target: CustomMessageHandler + 'static + Send + Sync,
128                         NS::Target: NodeSigner + 'static + Send + Sync,
129                 {
130                 // Create a waker to wake up poll_event_process, above
131                 let (event_waker, event_receiver) = mpsc::channel(1);
132                 tokio::spawn(Self::poll_event_process(peer_manager.clone(), event_receiver));
133
134                 // 4KiB is nice and big without handling too many messages all at once, giving other peers
135                 // a chance to do some work.
136                 let mut buf = [0; 4096];
137
138                 let mut our_descriptor = SocketDescriptor::new(us.clone());
139                 // An enum describing why we did/are disconnecting:
140                 enum Disconnect {
141                         // Rust-Lightning told us to disconnect, either by returning an Err or by calling
142                         // SocketDescriptor::disconnect_socket.
143                         // In this case, we do not call peer_manager.socket_disconnected() as Rust-Lightning
144                         // already knows we're disconnected.
145                         CloseConnection,
146                         // The connection was disconnected for some other reason, ie because the socket was
147                         // closed.
148                         // In this case, we do need to call peer_manager.socket_disconnected() to inform
149                         // Rust-Lightning that the socket is gone.
150                         PeerDisconnected
151                 }
152                 let disconnect_type = loop {
153                         let read_paused = {
154                                 let us_lock = us.lock().unwrap();
155                                 if us_lock.rl_requested_disconnect {
156                                         break Disconnect::CloseConnection;
157                                 }
158                                 us_lock.read_paused
159                         };
160                         tokio::select! {
161                                 v = write_avail_receiver.recv() => {
162                                         assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
163                                         if peer_manager.write_buffer_space_avail(&mut our_descriptor).is_err() {
164                                                 break Disconnect::CloseConnection;
165                                         }
166                                 },
167                                 _ = read_wake_receiver.recv() => {},
168                                 read = reader.read(&mut buf), if !read_paused => match read {
169                                         Ok(0) => break Disconnect::PeerDisconnected,
170                                         Ok(len) => {
171                                                 let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
172                                                 let mut us_lock = us.lock().unwrap();
173                                                 match read_res {
174                                                         Ok(pause_read) => {
175                                                                 if pause_read {
176                                                                         us_lock.read_paused = true;
177                                                                 }
178                                                         },
179                                                         Err(_) => break Disconnect::CloseConnection,
180                                                 }
181                                         },
182                                         Err(_) => break Disconnect::PeerDisconnected,
183                                 },
184                         }
185                         let _ = event_waker.try_send(());
186
187                         // At this point we've processed a message or two, and reset the ping timer for this
188                         // peer, at least in the "are we still receiving messages" context, if we don't give up
189                         // our timeslice to another task we may just spin on this peer, starving other peers
190                         // and eventually disconnecting them for ping timeouts. Instead, we explicitly yield
191                         // here.
192                         tokio::task::yield_now().await;
193                 };
194                 let writer_option = us.lock().unwrap().writer.take();
195                 if let Some(mut writer) = writer_option {
196                         // If the socket is already closed, shutdown() will fail, so just ignore it.
197                         let _ = writer.shutdown().await;
198                 }
199                 if let Disconnect::PeerDisconnected = disconnect_type {
200                         peer_manager.socket_disconnected(&our_descriptor);
201                         peer_manager.process_events();
202                 }
203         }
204
205         fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
206                 // We only ever need a channel of depth 1 here: if we returned a non-full write to the
207                 // PeerManager, we will eventually get notified that there is room in the socket to write
208                 // new bytes, which will generate an event. That event will be popped off the queue before
209                 // we call write_buffer_space_avail, ensuring that we have room to push a new () if, during
210                 // the write_buffer_space_avail() call, send_data() returns a non-full write.
211                 let (write_avail, write_receiver) = mpsc::channel(1);
212                 // Similarly here - our only goal is to make sure the reader wakes up at some point after
213                 // we shove a value into the channel which comes after we've reset the read_paused bool to
214                 // false.
215                 let (read_waker, read_receiver) = mpsc::channel(1);
216                 stream.set_nonblocking(true).unwrap();
217                 let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
218
219                 (reader, write_receiver, read_receiver,
220                 Arc::new(Mutex::new(Self {
221                         writer: Some(writer), write_avail, read_waker, read_paused: false,
222                         rl_requested_disconnect: false,
223                         id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
224                 })))
225         }
226 }
227
228 fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
229         match stream.peer_addr() {
230                 Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
231                         addr: sockaddr.ip().octets(),
232                         port: sockaddr.port(),
233                 }),
234                 Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
235                         addr: sockaddr.ip().octets(),
236                         port: sockaddr.port(),
237                 }),
238                 Err(_) => None,
239         }
240 }
241
242 /// Process incoming messages and feed outgoing messages on the provided socket generated by
243 /// accepting an incoming connection.
244 ///
245 /// The returned future will complete when the peer is disconnected and associated handling
246 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
247 /// not need to poll the provided future in order to make progress.
248 pub fn setup_inbound<PM, CMH, RMH, OMH, L, UMH, NS>(
249         peer_manager: PM,
250         stream: StdTcpStream,
251 ) -> impl std::future::Future<Output=()> where
252                 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
253                 CMH: Deref + 'static + Send + Sync,
254                 RMH: Deref + 'static + Send + Sync,
255                 OMH: Deref + 'static + Send + Sync,
256                 L: Deref + 'static + Send + Sync,
257                 UMH: Deref + 'static + Send + Sync,
258                 NS: Deref + 'static + Send + Sync,
259                 CMH::Target: ChannelMessageHandler + Send + Sync,
260                 RMH::Target: RoutingMessageHandler + Send + Sync,
261                 OMH::Target: OnionMessageHandler + Send + Sync,
262                 L::Target: Logger + Send + Sync,
263                 UMH::Target: CustomMessageHandler + Send + Sync,
264                 NS::Target: NodeSigner + Send + Sync,
265 {
266         let remote_addr = get_addr_from_stream(&stream);
267         let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
268         #[cfg(test)]
269         let last_us = Arc::clone(&us);
270
271         let handle_opt = if peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr).is_ok() {
272                 Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
273         } else {
274                 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
275                 // requirements.
276                 None
277         };
278
279         async move {
280                 if let Some(handle) = handle_opt {
281                         if let Err(e) = handle.await {
282                                 assert!(e.is_cancelled());
283                         } else {
284                                 // This is certainly not guaranteed to always be true - the read loop may exit
285                                 // while there are still pending write wakers that need to be woken up after the
286                                 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
287                                 // keep too many wakers around, this makes sense. The race should be rare (we do
288                                 // some work after shutdown()) and an error would be a major memory leak.
289                                 #[cfg(test)]
290                                 debug_assert!(Arc::try_unwrap(last_us).is_ok());
291                         }
292                 }
293         }
294 }
295
296 /// Process incoming messages and feed outgoing messages on the provided socket generated by
297 /// making an outbound connection which is expected to be accepted by a peer with the given
298 /// public key. The relevant processing is set to run free (via tokio::spawn).
299 ///
300 /// The returned future will complete when the peer is disconnected and associated handling
301 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
302 /// not need to poll the provided future in order to make progress.
303 pub fn setup_outbound<PM, CMH, RMH, OMH, L, UMH, NS>(
304         peer_manager: PM,
305         their_node_id: PublicKey,
306         stream: StdTcpStream,
307 ) -> impl std::future::Future<Output=()> where
308                 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
309                 CMH: Deref + 'static + Send + Sync,
310                 RMH: Deref + 'static + Send + Sync,
311                 OMH: Deref + 'static + Send + Sync,
312                 L: Deref + 'static + Send + Sync,
313                 UMH: Deref + 'static + Send + Sync,
314                 NS: Deref + 'static + Send + Sync,
315                 CMH::Target: ChannelMessageHandler + Send + Sync,
316                 RMH::Target: RoutingMessageHandler + Send + Sync,
317                 OMH::Target: OnionMessageHandler + Send + Sync,
318                 L::Target: Logger + Send + Sync,
319                 UMH::Target: CustomMessageHandler + Send + Sync,
320                 NS::Target: NodeSigner + Send + Sync,
321 {
322         let remote_addr = get_addr_from_stream(&stream);
323         let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
324         #[cfg(test)]
325         let last_us = Arc::clone(&us);
326         let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
327                 Some(tokio::spawn(async move {
328                         // We should essentially always have enough room in a TCP socket buffer to send the
329                         // initial 10s of bytes. However, tokio running in single-threaded mode will always
330                         // fail writes and wake us back up later to write. Thus, we handle a single
331                         // std::task::Poll::Pending but still expect to write the full set of bytes at once
332                         // and use a relatively tight timeout.
333                         if let Ok(Ok(())) = tokio::time::timeout(Duration::from_millis(100), async {
334                                 loop {
335                                         match SocketDescriptor::new(us.clone()).send_data(&initial_send, true) {
336                                                 v if v == initial_send.len() => break Ok(()),
337                                                 0 => {
338                                                         write_receiver.recv().await;
339                                                         // In theory we could check for if we've been instructed to disconnect
340                                                         // the peer here, but its OK to just skip it - we'll check for it in
341                                                         // schedule_read prior to any relevant calls into RL.
342                                                 },
343                                                 _ => {
344                                                         eprintln!("Failed to write first full message to socket!");
345                                                         peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us)));
346                                                         break Err(());
347                                                 }
348                                         }
349                                 }
350                         }).await {
351                                 Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver).await;
352                         }
353                 }))
354         } else {
355                 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
356                 // requirements.
357                 None
358         };
359
360         async move {
361                 if let Some(handle) = handle_opt {
362                         if let Err(e) = handle.await {
363                                 assert!(e.is_cancelled());
364                         } else {
365                                 // This is certainly not guaranteed to always be true - the read loop may exit
366                                 // while there are still pending write wakers that need to be woken up after the
367                                 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
368                                 // keep too many wakers around, this makes sense. The race should be rare (we do
369                                 // some work after shutdown()) and an error would be a major memory leak.
370                                 #[cfg(test)]
371                                 debug_assert!(Arc::try_unwrap(last_us).is_ok());
372                         }
373                 }
374         }
375 }
376
377 /// Process incoming messages and feed outgoing messages on a new connection made to the given
378 /// socket address which is expected to be accepted by a peer with the given public key (by
379 /// scheduling futures with tokio::spawn).
380 ///
381 /// Shorthand for TcpStream::connect(addr) with a timeout followed by setup_outbound().
382 ///
383 /// Returns a future (as the fn is async) which needs to be polled to complete the connection and
384 /// connection setup. That future then returns a future which will complete when the peer is
385 /// disconnected and associated handling futures are freed, though, because all processing in said
386 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
387 /// make progress.
388 pub async fn connect_outbound<PM, CMH, RMH, OMH, L, UMH, NS>(
389         peer_manager: PM,
390         their_node_id: PublicKey,
391         addr: SocketAddr,
392 ) -> Option<impl std::future::Future<Output=()>> where
393                 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
394                 CMH: Deref + 'static + Send + Sync,
395                 RMH: Deref + 'static + Send + Sync,
396                 OMH: Deref + 'static + Send + Sync,
397                 L: Deref + 'static + Send + Sync,
398                 UMH: Deref + 'static + Send + Sync,
399                 NS: Deref + 'static + Send + Sync,
400                 CMH::Target: ChannelMessageHandler + Send + Sync,
401                 RMH::Target: RoutingMessageHandler + Send + Sync,
402                 OMH::Target: OnionMessageHandler + Send + Sync,
403                 L::Target: Logger + Send + Sync,
404                 UMH::Target: CustomMessageHandler + Send + Sync,
405                 NS::Target: NodeSigner + Send + Sync,
406 {
407         if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
408                 Some(setup_outbound(peer_manager, their_node_id, stream))
409         } else { None }
410 }
411
412 const SOCK_WAKER_VTABLE: task::RawWakerVTable =
413         task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
414
415 fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
416         write_avail_to_waker(orig_ptr as *const mpsc::Sender<()>)
417 }
418 // When waking, an error should be fine. Most likely we got two send_datas in a row, both of which
419 // failed to fully write, but we only need to call write_buffer_space_avail() once. Otherwise, the
420 // sending thread may have already gone away due to a socket close, in which case there's nothing
421 // to wake up anyway.
422 fn wake_socket_waker(orig_ptr: *const ()) {
423         let sender = unsafe { &mut *(orig_ptr as *mut mpsc::Sender<()>) };
424         let _ = sender.try_send(());
425         drop_socket_waker(orig_ptr);
426 }
427 fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
428         let sender_ptr = orig_ptr as *const mpsc::Sender<()>;
429         let sender = unsafe { (*sender_ptr).clone() };
430         let _ = sender.try_send(());
431 }
432 fn drop_socket_waker(orig_ptr: *const ()) {
433         let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
434         // _orig_box is now dropped
435 }
436 fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker {
437         let new_box = Box::leak(Box::new(unsafe { (*sender).clone() }));
438         let new_ptr = new_box as *const mpsc::Sender<()>;
439         task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
440 }
441
442 /// The SocketDescriptor used to refer to sockets by a PeerHandler. This is pub only as it is a
443 /// type in the template of PeerHandler.
444 pub struct SocketDescriptor {
445         conn: Arc<Mutex<Connection>>,
446         id: u64,
447 }
448 impl SocketDescriptor {
449         fn new(conn: Arc<Mutex<Connection>>) -> Self {
450                 let id = conn.lock().unwrap().id;
451                 Self { conn, id }
452         }
453 }
454 impl peer_handler::SocketDescriptor for SocketDescriptor {
455         fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
456                 // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
457                 // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
458                 // a SocketDescriptor in it which can wake up the write_avail Sender, waking up the
459                 // processing future which will call write_buffer_space_avail and we'll end up back here.
460                 let mut us = self.conn.lock().unwrap();
461                 if us.writer.is_none() {
462                         // The writer gets take()n when it is time to shut down, so just fast-return 0 here.
463                         return 0;
464                 }
465
466                 if resume_read && us.read_paused {
467                         // The schedule_read future may go to lock up but end up getting woken up by there
468                         // being more room in the write buffer, dropping the other end of this Sender
469                         // before we get here, so we ignore any failures to wake it up.
470                         us.read_paused = false;
471                         let _ = us.read_waker.try_send(());
472                 }
473                 if data.is_empty() { return 0; }
474                 let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&us.write_avail)) };
475                 let mut ctx = task::Context::from_waker(&waker);
476                 let mut written_len = 0;
477                 loop {
478                         match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
479                                 task::Poll::Ready(Ok(res)) => {
480                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
481                                         // know how to handle it if it does (cause it should be a Poll::Pending
482                                         // instead):
483                                         assert_ne!(res, 0);
484                                         written_len += res;
485                                         if written_len == data.len() { return written_len; }
486                                 },
487                                 task::Poll::Ready(Err(e)) => {
488                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
489                                         // know how to handle it if it does (cause it should be a Poll::Pending
490                                         // instead):
491                                         assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
492                                         // Probably we've already been closed, just return what we have and let the
493                                         // read thread handle closing logic.
494                                         return written_len;
495                                 },
496                                 task::Poll::Pending => {
497                                         // We're queued up for a write event now, but we need to make sure we also
498                                         // pause read given we're now waiting on the remote end to ACK (and in
499                                         // accordance with the send_data() docs).
500                                         us.read_paused = true;
501                                         // Further, to avoid any current pending read causing a `read_event` call, wake
502                                         // up the read_waker and restart its loop.
503                                         let _ = us.read_waker.try_send(());
504                                         return written_len;
505                                 },
506                         }
507                 }
508         }
509
510         fn disconnect_socket(&mut self) {
511                 let mut us = self.conn.lock().unwrap();
512                 us.rl_requested_disconnect = true;
513                 // Wake up the sending thread, assuming it is still alive
514                 let _ = us.write_avail.try_send(());
515         }
516 }
517 impl Clone for SocketDescriptor {
518         fn clone(&self) -> Self {
519                 Self {
520                         conn: Arc::clone(&self.conn),
521                         id: self.id,
522                 }
523         }
524 }
525 impl Eq for SocketDescriptor {}
526 impl PartialEq for SocketDescriptor {
527         fn eq(&self, o: &Self) -> bool {
528                 self.id == o.id
529         }
530 }
531 impl Hash for SocketDescriptor {
532         fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
533                 self.id.hash(state);
534         }
535 }
536
537 #[cfg(test)]
538 mod tests {
539         use lightning::ln::features::*;
540         use lightning::ln::msgs::*;
541         use lightning::ln::peer_handler::{MessageHandler, PeerManager};
542         use lightning::ln::features::NodeFeatures;
543         use lightning::routing::gossip::NodeId;
544         use lightning::events::*;
545         use lightning::util::test_utils::TestNodeSigner;
546         use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey};
547
548         use tokio::sync::mpsc;
549
550         use std::mem;
551         use std::sync::atomic::{AtomicBool, Ordering};
552         use std::sync::{Arc, Mutex};
553         use std::time::Duration;
554
555         pub struct TestLogger();
556         impl lightning::util::logger::Logger for TestLogger {
557                 fn log(&self, record: &lightning::util::logger::Record) {
558                         println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
559                 }
560         }
561
562         struct MsgHandler{
563                 expected_pubkey: PublicKey,
564                 pubkey_connected: mpsc::Sender<()>,
565                 pubkey_disconnected: mpsc::Sender<()>,
566                 disconnected_flag: AtomicBool,
567                 msg_events: Mutex<Vec<MessageSendEvent>>,
568         }
569         impl RoutingMessageHandler for MsgHandler {
570                 fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
571                 fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
572                 fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
573                 fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { None }
574                 fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<NodeAnnouncement> { None }
575                 fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
576                 fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
577                 fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
578                 fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
579                 fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
580                 fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
581                 fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
582                 fn processing_queue_high(&self) -> bool { false }
583         }
584         impl ChannelMessageHandler for MsgHandler {
585                 fn handle_open_channel(&self, _their_node_id: &PublicKey, _msg: &OpenChannel) {}
586                 fn handle_accept_channel(&self, _their_node_id: &PublicKey, _msg: &AcceptChannel) {}
587                 fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {}
588                 fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {}
589                 fn handle_channel_ready(&self, _their_node_id: &PublicKey, _msg: &ChannelReady) {}
590                 fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &Shutdown) {}
591                 fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {}
592                 fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {}
593                 fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFulfillHTLC) {}
594                 fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailHTLC) {}
595                 fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailMalformedHTLC) {}
596                 fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &CommitmentSigned) {}
597                 fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &RevokeAndACK) {}
598                 fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
599                 fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
600                 fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
601                 fn peer_disconnected(&self, their_node_id: &PublicKey) {
602                         if *their_node_id == self.expected_pubkey {
603                                 self.disconnected_flag.store(true, Ordering::SeqCst);
604                                 self.pubkey_disconnected.clone().try_send(()).unwrap();
605                         }
606                 }
607                 fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> {
608                         if *their_node_id == self.expected_pubkey {
609                                 self.pubkey_connected.clone().try_send(()).unwrap();
610                         }
611                         Ok(())
612                 }
613                 fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &ChannelReestablish) {}
614                 fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {}
615                 fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
616                 fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
617         }
618         impl MessageSendEventsProvider for MsgHandler {
619                 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
620                         let mut ret = Vec::new();
621                         mem::swap(&mut *self.msg_events.lock().unwrap(), &mut ret);
622                         ret
623                 }
624         }
625
626         fn make_tcp_connection() -> (std::net::TcpStream, std::net::TcpStream) {
627                 if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") {
628                         (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0)
629                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:19735") {
630                         (std::net::TcpStream::connect("127.0.0.1:19735").unwrap(), listener.accept().unwrap().0)
631                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9997") {
632                         (std::net::TcpStream::connect("127.0.0.1:9997").unwrap(), listener.accept().unwrap().0)
633                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9998") {
634                         (std::net::TcpStream::connect("127.0.0.1:9998").unwrap(), listener.accept().unwrap().0)
635                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") {
636                         (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0)
637                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") {
638                         (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0)
639                 } else { panic!("Failed to bind to v4 localhost on common ports"); }
640         }
641
642         async fn do_basic_connection_test() {
643                 let secp_ctx = Secp256k1::new();
644                 let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
645                 let b_key = SecretKey::from_slice(&[1; 32]).unwrap();
646                 let a_pub = PublicKey::from_secret_key(&secp_ctx, &a_key);
647                 let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key);
648
649                 let (a_connected_sender, mut a_connected) = mpsc::channel(1);
650                 let (a_disconnected_sender, mut a_disconnected) = mpsc::channel(1);
651                 let a_handler = Arc::new(MsgHandler {
652                         expected_pubkey: b_pub,
653                         pubkey_connected: a_connected_sender,
654                         pubkey_disconnected: a_disconnected_sender,
655                         disconnected_flag: AtomicBool::new(false),
656                         msg_events: Mutex::new(Vec::new()),
657                 });
658                 let a_manager = Arc::new(PeerManager::new(MessageHandler {
659                         chan_handler: Arc::clone(&a_handler),
660                         route_handler: Arc::clone(&a_handler),
661                         onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
662                 }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key))));
663
664                 let (b_connected_sender, mut b_connected) = mpsc::channel(1);
665                 let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1);
666                 let b_handler = Arc::new(MsgHandler {
667                         expected_pubkey: a_pub,
668                         pubkey_connected: b_connected_sender,
669                         pubkey_disconnected: b_disconnected_sender,
670                         disconnected_flag: AtomicBool::new(false),
671                         msg_events: Mutex::new(Vec::new()),
672                 });
673                 let b_manager = Arc::new(PeerManager::new(MessageHandler {
674                         chan_handler: Arc::clone(&b_handler),
675                         route_handler: Arc::clone(&b_handler),
676                         onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
677                 }, 0, &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(b_key))));
678
679                 // We bind on localhost, hoping the environment is properly configured with a local
680                 // address. This may not always be the case in containers and the like, so if this test is
681                 // failing for you check that you have a loopback interface and it is configured with
682                 // 127.0.0.1.
683                 let (conn_a, conn_b) = make_tcp_connection();
684
685                 let fut_a = super::setup_outbound(Arc::clone(&a_manager), b_pub, conn_a);
686                 let fut_b = super::setup_inbound(b_manager, conn_b);
687
688                 tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap();
689                 tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap();
690
691                 a_handler.msg_events.lock().unwrap().push(MessageSendEvent::HandleError {
692                         node_id: b_pub, action: ErrorAction::DisconnectPeer { msg: None }
693                 });
694                 assert!(!a_handler.disconnected_flag.load(Ordering::SeqCst));
695                 assert!(!b_handler.disconnected_flag.load(Ordering::SeqCst));
696
697                 a_manager.process_events();
698                 tokio::time::timeout(Duration::from_secs(10), a_disconnected.recv()).await.unwrap();
699                 tokio::time::timeout(Duration::from_secs(1), b_disconnected.recv()).await.unwrap();
700                 assert!(a_handler.disconnected_flag.load(Ordering::SeqCst));
701                 assert!(b_handler.disconnected_flag.load(Ordering::SeqCst));
702
703                 fut_a.await;
704                 fut_b.await;
705         }
706
707         #[tokio::test(flavor = "multi_thread")]
708         async fn basic_threaded_connection_test() {
709                 do_basic_connection_test().await;
710         }
711
712         #[tokio::test]
713         async fn basic_unthreaded_connection_test() {
714                 do_basic_connection_test().await;
715         }
716
717         async fn race_disconnect_accept() {
718                 // Previously, if we handed an already-disconnected socket to `setup_inbound` we'd panic.
719                 // This attempts to find other similar races by opening connections and shutting them down
720                 // while connecting. Sadly in testing this did *not* reproduce the previous issue.
721                 let secp_ctx = Secp256k1::new();
722                 let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
723                 let b_key = SecretKey::from_slice(&[2; 32]).unwrap();
724                 let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key);
725
726                 let a_manager = Arc::new(PeerManager::new(MessageHandler {
727                         chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()),
728                         onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
729                         route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
730                 }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key))));
731
732                 // Make two connections, one for an inbound and one for an outbound connection
733                 let conn_a = {
734                         let (conn_a, _) = make_tcp_connection();
735                         conn_a
736                 };
737                 let conn_b = {
738                         let (_, conn_b) = make_tcp_connection();
739                         conn_b
740                 };
741
742                 // Call connection setup inside new tokio tasks.
743                 let manager_reference = Arc::clone(&a_manager);
744                 tokio::spawn(async move {
745                         super::setup_inbound(manager_reference, conn_a).await
746                 });
747                 tokio::spawn(async move {
748                         super::setup_outbound(a_manager, b_pub, conn_b).await
749                 });
750         }
751
752         #[tokio::test(flavor = "multi_thread")]
753         async fn threaded_race_disconnect_accept() {
754                 race_disconnect_accept().await;
755         }
756
757         #[tokio::test]
758         async fn unthreaded_race_disconnect_accept() {
759                 race_disconnect_accept().await;
760         }
761 }