Remove the `peer_disconnected` `no_connection_possible` flag
[rust-lightning] / lightning-net-tokio / src / lib.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! A socket handling library for those running in Tokio environments who wish to use
11 //! rust-lightning with native TcpStreams.
12 //!
13 //! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a
14 //! TcpStream and a reference to a PeerManager and the rest is handled", except for the
15 //! [Event](../lightning/util/events/enum.Event.html) handling mechanism; see example below.
16 //!
17 //! The PeerHandler, due to the fire-and-forget nature of this logic, must be an Arc, and must use
18 //! the SocketDescriptor provided here as the PeerHandler's SocketDescriptor.
19 //!
20 //! Three methods are exposed to register a new connection for handling in tokio::spawn calls; see
21 //! their individual docs for details.
22 //!
23 //! # Example
24 //! ```
25 //! use std::net::TcpStream;
26 //! use bitcoin::secp256k1::PublicKey;
27 //! use lightning::util::events::{Event, EventHandler, EventsProvider};
28 //! use std::net::SocketAddr;
29 //! use std::sync::Arc;
30 //!
31 //! // Define concrete types for our high-level objects:
32 //! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
33 //! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
34 //! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
35 //! type NodeSigner = dyn lightning::chain::keysinterface::NodeSigner + Send + Sync;
36 //! type UtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
37 //! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
38 //! type DataPersister = dyn lightning::chain::chainmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
39 //! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
40 //! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
41 //! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, UtxoLookup, Logger>>;
42 //!
43 //! // Connect to node with pubkey their_node_id at addr:
44 //! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
45 //!     lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await;
46 //!     loop {
47 //!             let event_handler = |event: Event| {
48 //!                     // Handle the event!
49 //!             };
50 //!             channel_manager.await_persistable_update();
51 //!             channel_manager.process_pending_events(&event_handler);
52 //!             chain_monitor.process_pending_events(&event_handler);
53 //!     }
54 //! }
55 //!
56 //! // Begin reading from a newly accepted socket and talk to the peer:
57 //! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
58 //!     lightning_net_tokio::setup_inbound(peer_manager, socket);
59 //!     loop {
60 //!             let event_handler = |event: Event| {
61 //!                     // Handle the event!
62 //!             };
63 //!             channel_manager.await_persistable_update();
64 //!             channel_manager.process_pending_events(&event_handler);
65 //!             chain_monitor.process_pending_events(&event_handler);
66 //!     }
67 //! }
68 //! ```
69
70 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
71 #![deny(broken_intra_doc_links)]
72 #![deny(private_intra_doc_links)]
73
74 #![deny(missing_docs)]
75 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
76
77 use bitcoin::secp256k1::PublicKey;
78
79 use tokio::net::TcpStream;
80 use tokio::{io, time};
81 use tokio::sync::mpsc;
82 use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
83
84 use lightning::chain::keysinterface::NodeSigner;
85 use lightning::ln::peer_handler;
86 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
87 use lightning::ln::peer_handler::CustomMessageHandler;
88 use lightning::ln::msgs::{ChannelMessageHandler, NetAddress, OnionMessageHandler, RoutingMessageHandler};
89 use lightning::util::logger::Logger;
90
91 use std::ops::Deref;
92 use std::task;
93 use std::net::SocketAddr;
94 use std::net::TcpStream as StdTcpStream;
95 use std::sync::{Arc, Mutex};
96 use std::sync::atomic::{AtomicU64, Ordering};
97 use std::time::Duration;
98 use std::hash::Hash;
99
100 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
101
102 /// Connection contains all our internal state for a connection - we hold a reference to the
103 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
104 /// read future (which is returned by schedule_read).
105 struct Connection {
106         writer: Option<io::WriteHalf<TcpStream>>,
107         // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
108         // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
109         // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
110         // This provides that indirection, with a Sender which gets handed to the PeerManager Arc on
111         // the schedule_read stack.
112         //
113         // An alternative (likely more effecient) approach would involve creating a RawWakerVTable at
114         // runtime with functions templated by the Arc<PeerManager> type, calling
115         // write_buffer_space_avail directly from tokio's write wake, however doing so would require
116         // more unsafe voodo than I really feel like writing.
117         write_avail: mpsc::Sender<()>,
118         // When we are told by rust-lightning to pause read (because we have writes backing up), we do
119         // so by setting read_paused. At that point, the read task will stop reading bytes from the
120         // socket. To wake it up (without otherwise changing its state, we can push a value into this
121         // Sender.
122         read_waker: mpsc::Sender<()>,
123         read_paused: bool,
124         rl_requested_disconnect: bool,
125         id: u64,
126 }
127 impl Connection {
128         async fn poll_event_process<PM, CMH, RMH, OMH, L, UMH, NS>(
129                 peer_manager: PM,
130                 mut event_receiver: mpsc::Receiver<()>,
131         ) where
132                         PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync,
133                         CMH: Deref + 'static + Send + Sync,
134                         RMH: Deref + 'static + Send + Sync,
135                         OMH: Deref + 'static + Send + Sync,
136                         L: Deref + 'static + Send + Sync,
137                         UMH: Deref + 'static + Send + Sync,
138                         NS: Deref + 'static + Send + Sync,
139                         CMH::Target: ChannelMessageHandler + Send + Sync,
140                         RMH::Target: RoutingMessageHandler + Send + Sync,
141                         OMH::Target: OnionMessageHandler + Send + Sync,
142                         L::Target: Logger + Send + Sync,
143                         UMH::Target: CustomMessageHandler + Send + Sync,
144                         NS::Target: NodeSigner + Send + Sync,
145         {
146                 loop {
147                         if event_receiver.recv().await.is_none() {
148                                 return;
149                         }
150                         peer_manager.process_events();
151                 }
152         }
153
154         async fn schedule_read<PM, CMH, RMH, OMH, L, UMH, NS>(
155                 peer_manager: PM,
156                 us: Arc<Mutex<Self>>,
157                 mut reader: io::ReadHalf<TcpStream>,
158                 mut read_wake_receiver: mpsc::Receiver<()>,
159                 mut write_avail_receiver: mpsc::Receiver<()>,
160         ) where
161                         PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
162                         CMH: Deref + 'static + Send + Sync,
163                         RMH: Deref + 'static + Send + Sync,
164                         OMH: Deref + 'static + Send + Sync,
165                         L: Deref + 'static + Send + Sync,
166                         UMH: Deref + 'static + Send + Sync,
167                         NS: Deref + 'static + Send + Sync,
168                         CMH::Target: ChannelMessageHandler + 'static + Send + Sync,
169                         RMH::Target: RoutingMessageHandler + 'static + Send + Sync,
170                         OMH::Target: OnionMessageHandler + 'static + Send + Sync,
171                         L::Target: Logger + 'static + Send + Sync,
172                         UMH::Target: CustomMessageHandler + 'static + Send + Sync,
173                         NS::Target: NodeSigner + 'static + Send + Sync,
174                 {
175                 // Create a waker to wake up poll_event_process, above
176                 let (event_waker, event_receiver) = mpsc::channel(1);
177                 tokio::spawn(Self::poll_event_process(peer_manager.clone(), event_receiver));
178
179                 // 4KiB is nice and big without handling too many messages all at once, giving other peers
180                 // a chance to do some work.
181                 let mut buf = [0; 4096];
182
183                 let mut our_descriptor = SocketDescriptor::new(us.clone());
184                 // An enum describing why we did/are disconnecting:
185                 enum Disconnect {
186                         // Rust-Lightning told us to disconnect, either by returning an Err or by calling
187                         // SocketDescriptor::disconnect_socket.
188                         // In this case, we do not call peer_manager.socket_disconnected() as Rust-Lightning
189                         // already knows we're disconnected.
190                         CloseConnection,
191                         // The connection was disconnected for some other reason, ie because the socket was
192                         // closed.
193                         // In this case, we do need to call peer_manager.socket_disconnected() to inform
194                         // Rust-Lightning that the socket is gone.
195                         PeerDisconnected
196                 }
197                 let disconnect_type = loop {
198                         let read_paused = {
199                                 let us_lock = us.lock().unwrap();
200                                 if us_lock.rl_requested_disconnect {
201                                         break Disconnect::CloseConnection;
202                                 }
203                                 us_lock.read_paused
204                         };
205                         tokio::select! {
206                                 v = write_avail_receiver.recv() => {
207                                         assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
208                                         if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
209                                                 break Disconnect::CloseConnection;
210                                         }
211                                 },
212                                 _ = read_wake_receiver.recv() => {},
213                                 read = reader.read(&mut buf), if !read_paused => match read {
214                                         Ok(0) => break Disconnect::PeerDisconnected,
215                                         Ok(len) => {
216                                                 let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
217                                                 let mut us_lock = us.lock().unwrap();
218                                                 match read_res {
219                                                         Ok(pause_read) => {
220                                                                 if pause_read {
221                                                                         us_lock.read_paused = true;
222                                                                 }
223                                                         },
224                                                         Err(_) => break Disconnect::CloseConnection,
225                                                 }
226                                         },
227                                         Err(_) => break Disconnect::PeerDisconnected,
228                                 },
229                         }
230                         let _ = event_waker.try_send(());
231
232                         // At this point we've processed a message or two, and reset the ping timer for this
233                         // peer, at least in the "are we still receiving messages" context, if we don't give up
234                         // our timeslice to another task we may just spin on this peer, starving other peers
235                         // and eventually disconnecting them for ping timeouts. Instead, we explicitly yield
236                         // here.
237                         tokio::task::yield_now().await;
238                 };
239                 let writer_option = us.lock().unwrap().writer.take();
240                 if let Some(mut writer) = writer_option {
241                         // If the socket is already closed, shutdown() will fail, so just ignore it.
242                         let _ = writer.shutdown().await;
243                 }
244                 if let Disconnect::PeerDisconnected = disconnect_type {
245                         peer_manager.socket_disconnected(&our_descriptor);
246                         peer_manager.process_events();
247                 }
248         }
249
250         fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
251                 // We only ever need a channel of depth 1 here: if we returned a non-full write to the
252                 // PeerManager, we will eventually get notified that there is room in the socket to write
253                 // new bytes, which will generate an event. That event will be popped off the queue before
254                 // we call write_buffer_space_avail, ensuring that we have room to push a new () if, during
255                 // the write_buffer_space_avail() call, send_data() returns a non-full write.
256                 let (write_avail, write_receiver) = mpsc::channel(1);
257                 // Similarly here - our only goal is to make sure the reader wakes up at some point after
258                 // we shove a value into the channel which comes after we've reset the read_paused bool to
259                 // false.
260                 let (read_waker, read_receiver) = mpsc::channel(1);
261                 stream.set_nonblocking(true).unwrap();
262                 let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
263
264                 (reader, write_receiver, read_receiver,
265                 Arc::new(Mutex::new(Self {
266                         writer: Some(writer), write_avail, read_waker, read_paused: false,
267                         rl_requested_disconnect: false,
268                         id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
269                 })))
270         }
271 }
272
273 fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
274         match stream.peer_addr() {
275                 Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
276                         addr: sockaddr.ip().octets(),
277                         port: sockaddr.port(),
278                 }),
279                 Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
280                         addr: sockaddr.ip().octets(),
281                         port: sockaddr.port(),
282                 }),
283                 Err(_) => None,
284         }
285 }
286
287 /// Process incoming messages and feed outgoing messages on the provided socket generated by
288 /// accepting an incoming connection.
289 ///
290 /// The returned future will complete when the peer is disconnected and associated handling
291 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
292 /// not need to poll the provided future in order to make progress.
293 pub fn setup_inbound<PM, CMH, RMH, OMH, L, UMH, NS>(
294         peer_manager: PM,
295         stream: StdTcpStream,
296 ) -> impl std::future::Future<Output=()> where
297                 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
298                 CMH: Deref + 'static + Send + Sync,
299                 RMH: Deref + 'static + Send + Sync,
300                 OMH: Deref + 'static + Send + Sync,
301                 L: Deref + 'static + Send + Sync,
302                 UMH: Deref + 'static + Send + Sync,
303                 NS: Deref + 'static + Send + Sync,
304                 CMH::Target: ChannelMessageHandler + Send + Sync,
305                 RMH::Target: RoutingMessageHandler + Send + Sync,
306                 OMH::Target: OnionMessageHandler + Send + Sync,
307                 L::Target: Logger + Send + Sync,
308                 UMH::Target: CustomMessageHandler + Send + Sync,
309                 NS::Target: NodeSigner + Send + Sync,
310 {
311         let remote_addr = get_addr_from_stream(&stream);
312         let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
313         #[cfg(test)]
314         let last_us = Arc::clone(&us);
315
316         let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
317                 Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
318         } else {
319                 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
320                 // requirements.
321                 None
322         };
323
324         async move {
325                 if let Some(handle) = handle_opt {
326                         if let Err(e) = handle.await {
327                                 assert!(e.is_cancelled());
328                         } else {
329                                 // This is certainly not guaranteed to always be true - the read loop may exit
330                                 // while there are still pending write wakers that need to be woken up after the
331                                 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
332                                 // keep too many wakers around, this makes sense. The race should be rare (we do
333                                 // some work after shutdown()) and an error would be a major memory leak.
334                                 #[cfg(test)]
335                                 debug_assert!(Arc::try_unwrap(last_us).is_ok());
336                         }
337                 }
338         }
339 }
340
341 /// Process incoming messages and feed outgoing messages on the provided socket generated by
342 /// making an outbound connection which is expected to be accepted by a peer with the given
343 /// public key. The relevant processing is set to run free (via tokio::spawn).
344 ///
345 /// The returned future will complete when the peer is disconnected and associated handling
346 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
347 /// not need to poll the provided future in order to make progress.
348 pub fn setup_outbound<PM, CMH, RMH, OMH, L, UMH, NS>(
349         peer_manager: PM,
350         their_node_id: PublicKey,
351         stream: StdTcpStream,
352 ) -> impl std::future::Future<Output=()> where
353                 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
354                 CMH: Deref + 'static + Send + Sync,
355                 RMH: Deref + 'static + Send + Sync,
356                 OMH: Deref + 'static + Send + Sync,
357                 L: Deref + 'static + Send + Sync,
358                 UMH: Deref + 'static + Send + Sync,
359                 NS: Deref + 'static + Send + Sync,
360                 CMH::Target: ChannelMessageHandler + Send + Sync,
361                 RMH::Target: RoutingMessageHandler + Send + Sync,
362                 OMH::Target: OnionMessageHandler + Send + Sync,
363                 L::Target: Logger + Send + Sync,
364                 UMH::Target: CustomMessageHandler + Send + Sync,
365                 NS::Target: NodeSigner + Send + Sync,
366 {
367         let remote_addr = get_addr_from_stream(&stream);
368         let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
369         #[cfg(test)]
370         let last_us = Arc::clone(&us);
371         let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
372                 Some(tokio::spawn(async move {
373                         // We should essentially always have enough room in a TCP socket buffer to send the
374                         // initial 10s of bytes. However, tokio running in single-threaded mode will always
375                         // fail writes and wake us back up later to write. Thus, we handle a single
376                         // std::task::Poll::Pending but still expect to write the full set of bytes at once
377                         // and use a relatively tight timeout.
378                         if let Ok(Ok(())) = tokio::time::timeout(Duration::from_millis(100), async {
379                                 loop {
380                                         match SocketDescriptor::new(us.clone()).send_data(&initial_send, true) {
381                                                 v if v == initial_send.len() => break Ok(()),
382                                                 0 => {
383                                                         write_receiver.recv().await;
384                                                         // In theory we could check for if we've been instructed to disconnect
385                                                         // the peer here, but its OK to just skip it - we'll check for it in
386                                                         // schedule_read prior to any relevant calls into RL.
387                                                 },
388                                                 _ => {
389                                                         eprintln!("Failed to write first full message to socket!");
390                                                         peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us)));
391                                                         break Err(());
392                                                 }
393                                         }
394                                 }
395                         }).await {
396                                 Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver).await;
397                         }
398                 }))
399         } else {
400                 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
401                 // requirements.
402                 None
403         };
404
405         async move {
406                 if let Some(handle) = handle_opt {
407                         if let Err(e) = handle.await {
408                                 assert!(e.is_cancelled());
409                         } else {
410                                 // This is certainly not guaranteed to always be true - the read loop may exit
411                                 // while there are still pending write wakers that need to be woken up after the
412                                 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
413                                 // keep too many wakers around, this makes sense. The race should be rare (we do
414                                 // some work after shutdown()) and an error would be a major memory leak.
415                                 #[cfg(test)]
416                                 debug_assert!(Arc::try_unwrap(last_us).is_ok());
417                         }
418                 }
419         }
420 }
421
422 /// Process incoming messages and feed outgoing messages on a new connection made to the given
423 /// socket address which is expected to be accepted by a peer with the given public key (by
424 /// scheduling futures with tokio::spawn).
425 ///
426 /// Shorthand for TcpStream::connect(addr) with a timeout followed by setup_outbound().
427 ///
428 /// Returns a future (as the fn is async) which needs to be polled to complete the connection and
429 /// connection setup. That future then returns a future which will complete when the peer is
430 /// disconnected and associated handling futures are freed, though, because all processing in said
431 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
432 /// make progress.
433 pub async fn connect_outbound<PM, CMH, RMH, OMH, L, UMH, NS>(
434         peer_manager: PM,
435         their_node_id: PublicKey,
436         addr: SocketAddr,
437 ) -> Option<impl std::future::Future<Output=()>> where
438                 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
439                 CMH: Deref + 'static + Send + Sync,
440                 RMH: Deref + 'static + Send + Sync,
441                 OMH: Deref + 'static + Send + Sync,
442                 L: Deref + 'static + Send + Sync,
443                 UMH: Deref + 'static + Send + Sync,
444                 NS: Deref + 'static + Send + Sync,
445                 CMH::Target: ChannelMessageHandler + Send + Sync,
446                 RMH::Target: RoutingMessageHandler + Send + Sync,
447                 OMH::Target: OnionMessageHandler + Send + Sync,
448                 L::Target: Logger + Send + Sync,
449                 UMH::Target: CustomMessageHandler + Send + Sync,
450                 NS::Target: NodeSigner + Send + Sync,
451 {
452         if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
453                 Some(setup_outbound(peer_manager, their_node_id, stream))
454         } else { None }
455 }
456
457 const SOCK_WAKER_VTABLE: task::RawWakerVTable =
458         task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
459
460 fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
461         write_avail_to_waker(orig_ptr as *const mpsc::Sender<()>)
462 }
463 // When waking, an error should be fine. Most likely we got two send_datas in a row, both of which
464 // failed to fully write, but we only need to call write_buffer_space_avail() once. Otherwise, the
465 // sending thread may have already gone away due to a socket close, in which case there's nothing
466 // to wake up anyway.
467 fn wake_socket_waker(orig_ptr: *const ()) {
468         let sender = unsafe { &mut *(orig_ptr as *mut mpsc::Sender<()>) };
469         let _ = sender.try_send(());
470         drop_socket_waker(orig_ptr);
471 }
472 fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
473         let sender_ptr = orig_ptr as *const mpsc::Sender<()>;
474         let sender = unsafe { (*sender_ptr).clone() };
475         let _ = sender.try_send(());
476 }
477 fn drop_socket_waker(orig_ptr: *const ()) {
478         let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
479         // _orig_box is now dropped
480 }
481 fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker {
482         let new_box = Box::leak(Box::new(unsafe { (*sender).clone() }));
483         let new_ptr = new_box as *const mpsc::Sender<()>;
484         task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
485 }
486
487 /// The SocketDescriptor used to refer to sockets by a PeerHandler. This is pub only as it is a
488 /// type in the template of PeerHandler.
489 pub struct SocketDescriptor {
490         conn: Arc<Mutex<Connection>>,
491         id: u64,
492 }
493 impl SocketDescriptor {
494         fn new(conn: Arc<Mutex<Connection>>) -> Self {
495                 let id = conn.lock().unwrap().id;
496                 Self { conn, id }
497         }
498 }
499 impl peer_handler::SocketDescriptor for SocketDescriptor {
500         fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
501                 // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
502                 // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
503                 // a SocketDescriptor in it which can wake up the write_avail Sender, waking up the
504                 // processing future which will call write_buffer_space_avail and we'll end up back here.
505                 let mut us = self.conn.lock().unwrap();
506                 if us.writer.is_none() {
507                         // The writer gets take()n when it is time to shut down, so just fast-return 0 here.
508                         return 0;
509                 }
510
511                 if resume_read && us.read_paused {
512                         // The schedule_read future may go to lock up but end up getting woken up by there
513                         // being more room in the write buffer, dropping the other end of this Sender
514                         // before we get here, so we ignore any failures to wake it up.
515                         us.read_paused = false;
516                         let _ = us.read_waker.try_send(());
517                 }
518                 if data.is_empty() { return 0; }
519                 let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&us.write_avail)) };
520                 let mut ctx = task::Context::from_waker(&waker);
521                 let mut written_len = 0;
522                 loop {
523                         match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
524                                 task::Poll::Ready(Ok(res)) => {
525                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
526                                         // know how to handle it if it does (cause it should be a Poll::Pending
527                                         // instead):
528                                         assert_ne!(res, 0);
529                                         written_len += res;
530                                         if written_len == data.len() { return written_len; }
531                                 },
532                                 task::Poll::Ready(Err(e)) => {
533                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
534                                         // know how to handle it if it does (cause it should be a Poll::Pending
535                                         // instead):
536                                         assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
537                                         // Probably we've already been closed, just return what we have and let the
538                                         // read thread handle closing logic.
539                                         return written_len;
540                                 },
541                                 task::Poll::Pending => {
542                                         // We're queued up for a write event now, but we need to make sure we also
543                                         // pause read given we're now waiting on the remote end to ACK (and in
544                                         // accordance with the send_data() docs).
545                                         us.read_paused = true;
546                                         // Further, to avoid any current pending read causing a `read_event` call, wake
547                                         // up the read_waker and restart its loop.
548                                         let _ = us.read_waker.try_send(());
549                                         return written_len;
550                                 },
551                         }
552                 }
553         }
554
555         fn disconnect_socket(&mut self) {
556                 let mut us = self.conn.lock().unwrap();
557                 us.rl_requested_disconnect = true;
558                 // Wake up the sending thread, assuming it is still alive
559                 let _ = us.write_avail.try_send(());
560         }
561 }
562 impl Clone for SocketDescriptor {
563         fn clone(&self) -> Self {
564                 Self {
565                         conn: Arc::clone(&self.conn),
566                         id: self.id,
567                 }
568         }
569 }
570 impl Eq for SocketDescriptor {}
571 impl PartialEq for SocketDescriptor {
572         fn eq(&self, o: &Self) -> bool {
573                 self.id == o.id
574         }
575 }
576 impl Hash for SocketDescriptor {
577         fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
578                 self.id.hash(state);
579         }
580 }
581
582 #[cfg(test)]
583 mod tests {
584         use lightning::ln::features::*;
585         use lightning::ln::msgs::*;
586         use lightning::ln::peer_handler::{MessageHandler, PeerManager};
587         use lightning::ln::features::NodeFeatures;
588         use lightning::routing::gossip::NodeId;
589         use lightning::util::events::*;
590         use lightning::util::test_utils::TestNodeSigner;
591         use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey};
592
593         use tokio::sync::mpsc;
594
595         use std::mem;
596         use std::sync::atomic::{AtomicBool, Ordering};
597         use std::sync::{Arc, Mutex};
598         use std::time::Duration;
599
600         pub struct TestLogger();
601         impl lightning::util::logger::Logger for TestLogger {
602                 fn log(&self, record: &lightning::util::logger::Record) {
603                         println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
604                 }
605         }
606
607         struct MsgHandler{
608                 expected_pubkey: PublicKey,
609                 pubkey_connected: mpsc::Sender<()>,
610                 pubkey_disconnected: mpsc::Sender<()>,
611                 disconnected_flag: AtomicBool,
612                 msg_events: Mutex<Vec<MessageSendEvent>>,
613         }
614         impl RoutingMessageHandler for MsgHandler {
615                 fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
616                 fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
617                 fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
618                 fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { None }
619                 fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<NodeAnnouncement> { None }
620                 fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { Ok(()) }
621                 fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
622                 fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
623                 fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
624                 fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
625                 fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
626                 fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
627                 fn processing_queue_high(&self) -> bool { false }
628         }
629         impl ChannelMessageHandler for MsgHandler {
630                 fn handle_open_channel(&self, _their_node_id: &PublicKey, _msg: &OpenChannel) {}
631                 fn handle_accept_channel(&self, _their_node_id: &PublicKey, _msg: &AcceptChannel) {}
632                 fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {}
633                 fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {}
634                 fn handle_channel_ready(&self, _their_node_id: &PublicKey, _msg: &ChannelReady) {}
635                 fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &Shutdown) {}
636                 fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {}
637                 fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {}
638                 fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFulfillHTLC) {}
639                 fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailHTLC) {}
640                 fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailMalformedHTLC) {}
641                 fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &CommitmentSigned) {}
642                 fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &RevokeAndACK) {}
643                 fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
644                 fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
645                 fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
646                 fn peer_disconnected(&self, their_node_id: &PublicKey) {
647                         if *their_node_id == self.expected_pubkey {
648                                 self.disconnected_flag.store(true, Ordering::SeqCst);
649                                 self.pubkey_disconnected.clone().try_send(()).unwrap();
650                         }
651                 }
652                 fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> {
653                         if *their_node_id == self.expected_pubkey {
654                                 self.pubkey_connected.clone().try_send(()).unwrap();
655                         }
656                         Ok(())
657                 }
658                 fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &ChannelReestablish) {}
659                 fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {}
660                 fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
661                 fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
662         }
663         impl MessageSendEventsProvider for MsgHandler {
664                 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
665                         let mut ret = Vec::new();
666                         mem::swap(&mut *self.msg_events.lock().unwrap(), &mut ret);
667                         ret
668                 }
669         }
670
671         fn make_tcp_connection() -> (std::net::TcpStream, std::net::TcpStream) {
672                 if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") {
673                         (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0)
674                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:19735") {
675                         (std::net::TcpStream::connect("127.0.0.1:19735").unwrap(), listener.accept().unwrap().0)
676                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9997") {
677                         (std::net::TcpStream::connect("127.0.0.1:9997").unwrap(), listener.accept().unwrap().0)
678                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9998") {
679                         (std::net::TcpStream::connect("127.0.0.1:9998").unwrap(), listener.accept().unwrap().0)
680                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") {
681                         (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0)
682                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") {
683                         (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0)
684                 } else { panic!("Failed to bind to v4 localhost on common ports"); }
685         }
686
687         async fn do_basic_connection_test() {
688                 let secp_ctx = Secp256k1::new();
689                 let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
690                 let b_key = SecretKey::from_slice(&[1; 32]).unwrap();
691                 let a_pub = PublicKey::from_secret_key(&secp_ctx, &a_key);
692                 let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key);
693
694                 let (a_connected_sender, mut a_connected) = mpsc::channel(1);
695                 let (a_disconnected_sender, mut a_disconnected) = mpsc::channel(1);
696                 let a_handler = Arc::new(MsgHandler {
697                         expected_pubkey: b_pub,
698                         pubkey_connected: a_connected_sender,
699                         pubkey_disconnected: a_disconnected_sender,
700                         disconnected_flag: AtomicBool::new(false),
701                         msg_events: Mutex::new(Vec::new()),
702                 });
703                 let a_manager = Arc::new(PeerManager::new(MessageHandler {
704                         chan_handler: Arc::clone(&a_handler),
705                         route_handler: Arc::clone(&a_handler),
706                         onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
707                 }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key))));
708
709                 let (b_connected_sender, mut b_connected) = mpsc::channel(1);
710                 let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1);
711                 let b_handler = Arc::new(MsgHandler {
712                         expected_pubkey: a_pub,
713                         pubkey_connected: b_connected_sender,
714                         pubkey_disconnected: b_disconnected_sender,
715                         disconnected_flag: AtomicBool::new(false),
716                         msg_events: Mutex::new(Vec::new()),
717                 });
718                 let b_manager = Arc::new(PeerManager::new(MessageHandler {
719                         chan_handler: Arc::clone(&b_handler),
720                         route_handler: Arc::clone(&b_handler),
721                         onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
722                 }, 0, &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(b_key))));
723
724                 // We bind on localhost, hoping the environment is properly configured with a local
725                 // address. This may not always be the case in containers and the like, so if this test is
726                 // failing for you check that you have a loopback interface and it is configured with
727                 // 127.0.0.1.
728                 let (conn_a, conn_b) = make_tcp_connection();
729
730                 let fut_a = super::setup_outbound(Arc::clone(&a_manager), b_pub, conn_a);
731                 let fut_b = super::setup_inbound(b_manager, conn_b);
732
733                 tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap();
734                 tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap();
735
736                 a_handler.msg_events.lock().unwrap().push(MessageSendEvent::HandleError {
737                         node_id: b_pub, action: ErrorAction::DisconnectPeer { msg: None }
738                 });
739                 assert!(!a_handler.disconnected_flag.load(Ordering::SeqCst));
740                 assert!(!b_handler.disconnected_flag.load(Ordering::SeqCst));
741
742                 a_manager.process_events();
743                 tokio::time::timeout(Duration::from_secs(10), a_disconnected.recv()).await.unwrap();
744                 tokio::time::timeout(Duration::from_secs(1), b_disconnected.recv()).await.unwrap();
745                 assert!(a_handler.disconnected_flag.load(Ordering::SeqCst));
746                 assert!(b_handler.disconnected_flag.load(Ordering::SeqCst));
747
748                 fut_a.await;
749                 fut_b.await;
750         }
751
752         #[tokio::test(flavor = "multi_thread")]
753         async fn basic_threaded_connection_test() {
754                 do_basic_connection_test().await;
755         }
756
757         #[tokio::test]
758         async fn basic_unthreaded_connection_test() {
759                 do_basic_connection_test().await;
760         }
761
762         async fn race_disconnect_accept() {
763                 // Previously, if we handed an already-disconnected socket to `setup_inbound` we'd panic.
764                 // This attempts to find other similar races by opening connections and shutting them down
765                 // while connecting. Sadly in testing this did *not* reproduce the previous issue.
766                 let secp_ctx = Secp256k1::new();
767                 let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
768                 let b_key = SecretKey::from_slice(&[2; 32]).unwrap();
769                 let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key);
770
771                 let a_manager = Arc::new(PeerManager::new(MessageHandler {
772                         chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()),
773                         onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
774                         route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
775                 }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key))));
776
777                 // Make two connections, one for an inbound and one for an outbound connection
778                 let conn_a = {
779                         let (conn_a, _) = make_tcp_connection();
780                         conn_a
781                 };
782                 let conn_b = {
783                         let (_, conn_b) = make_tcp_connection();
784                         conn_b
785                 };
786
787                 // Call connection setup inside new tokio tasks.
788                 let manager_reference = Arc::clone(&a_manager);
789                 tokio::spawn(async move {
790                         super::setup_inbound(manager_reference, conn_a).await
791                 });
792                 tokio::spawn(async move {
793                         super::setup_outbound(a_manager, b_pub, conn_b).await
794                 });
795         }
796
797         #[tokio::test(flavor = "multi_thread")]
798         async fn threaded_race_disconnect_accept() {
799                 race_disconnect_accept().await;
800         }
801
802         #[tokio::test]
803         async fn unthreaded_race_disconnect_accept() {
804                 race_disconnect_accept().await;
805         }
806 }