a9fd861bc846e440fd8ff54ab31deedf622aa8e6
[rust-lightning] / lightning-net-tokio / src / lib.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! A socket handling library for those running in Tokio environments who wish to use
11 //! rust-lightning with native TcpStreams.
12 //!
13 //! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a
14 //! TcpStream and a reference to a PeerManager and the rest is handled", except for the
15 //! [Event](../lightning/util/events/enum.Event.html) handling mechanism; see example below.
16 //!
17 //! The PeerHandler, due to the fire-and-forget nature of this logic, must be an Arc, and must use
18 //! the SocketDescriptor provided here as the PeerHandler's SocketDescriptor.
19 //!
20 //! Three methods are exposed to register a new connection for handling in tokio::spawn calls; see
21 //! their individual docs for details.
22 //!
23 //! # Example
24 //! ```
25 //! use std::net::TcpStream;
26 //! use bitcoin::secp256k1::key::PublicKey;
27 //! use lightning::util::events::{Event, EventHandler, EventsProvider};
28 //! use std::net::SocketAddr;
29 //! use std::sync::Arc;
30 //!
31 //! // Define concrete types for our high-level objects:
32 //! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
33 //! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
34 //! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
35 //! type ChainAccess = dyn lightning::chain::Access + Send + Sync;
36 //! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
37 //! type DataPersister = dyn lightning::chain::chainmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
38 //! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
39 //! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
40 //! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>>;
41 //!
42 //! // Connect to node with pubkey their_node_id at addr:
43 //! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
44 //!     lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await;
45 //!     loop {
46 //!             let event_handler = |event: &Event| {
47 //!                     // Handle the event!
48 //!             };
49 //!             channel_manager.await_persistable_update();
50 //!             channel_manager.process_pending_events(&event_handler);
51 //!             chain_monitor.process_pending_events(&event_handler);
52 //!     }
53 //! }
54 //!
55 //! // Begin reading from a newly accepted socket and talk to the peer:
56 //! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
57 //!     lightning_net_tokio::setup_inbound(peer_manager, socket);
58 //!     loop {
59 //!             let event_handler = |event: &Event| {
60 //!                     // Handle the event!
61 //!             };
62 //!             channel_manager.await_persistable_update();
63 //!             channel_manager.process_pending_events(&event_handler);
64 //!             chain_monitor.process_pending_events(&event_handler);
65 //!     }
66 //! }
67 //! ```
68
69 #![deny(broken_intra_doc_links)]
70 #![deny(missing_docs)]
71
72 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
73
74 use bitcoin::secp256k1::key::PublicKey;
75
76 use tokio::net::TcpStream;
77 use tokio::{io, time};
78 use tokio::sync::mpsc;
79 use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
80
81 use lightning::ln::peer_handler;
82 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
83 use lightning::ln::peer_handler::CustomMessageHandler;
84 use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
85 use lightning::util::logger::Logger;
86
87 use std::task;
88 use std::net::IpAddr;
89 use std::net::SocketAddr;
90 use std::net::TcpStream as StdTcpStream;
91 use std::sync::{Arc, Mutex};
92 use std::sync::atomic::{AtomicU64, Ordering};
93 use std::time::Duration;
94 use std::hash::Hash;
95
96 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
97
98 /// Connection contains all our internal state for a connection - we hold a reference to the
99 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
100 /// read future (which is returned by schedule_read).
101 struct Connection {
102         writer: Option<io::WriteHalf<TcpStream>>,
103         // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
104         // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
105         // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
106         // This provides that indirection, with a Sender which gets handed to the PeerManager Arc on
107         // the schedule_read stack.
108         //
109         // An alternative (likely more effecient) approach would involve creating a RawWakerVTable at
110         // runtime with functions templated by the Arc<PeerManager> type, calling
111         // write_buffer_space_avail directly from tokio's write wake, however doing so would require
112         // more unsafe voodo than I really feel like writing.
113         write_avail: mpsc::Sender<()>,
114         // When we are told by rust-lightning to pause read (because we have writes backing up), we do
115         // so by setting read_paused. At that point, the read task will stop reading bytes from the
116         // socket. To wake it up (without otherwise changing its state, we can push a value into this
117         // Sender.
118         read_waker: mpsc::Sender<()>,
119         read_paused: bool,
120         rl_requested_disconnect: bool,
121         id: u64,
122 }
123 impl Connection {
124         async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
125                         CMH: ChannelMessageHandler + 'static,
126                         RMH: RoutingMessageHandler + 'static,
127                         L: Logger + 'static + ?Sized,
128                         UMH: CustomMessageHandler + 'static {
129                 // 8KB is nice and big but also should never cause any issues with stack overflowing.
130                 let mut buf = [0; 8192];
131
132                 let mut our_descriptor = SocketDescriptor::new(us.clone());
133                 // An enum describing why we did/are disconnecting:
134                 enum Disconnect {
135                         // Rust-Lightning told us to disconnect, either by returning an Err or by calling
136                         // SocketDescriptor::disconnect_socket.
137                         // In this case, we do not call peer_manager.socket_disconnected() as Rust-Lightning
138                         // already knows we're disconnected.
139                         CloseConnection,
140                         // The connection was disconnected for some other reason, ie because the socket was
141                         // closed.
142                         // In this case, we do need to call peer_manager.socket_disconnected() to inform
143                         // Rust-Lightning that the socket is gone.
144                         PeerDisconnected
145                 }
146                 let disconnect_type = loop {
147                         let read_paused = {
148                                 let us_lock = us.lock().unwrap();
149                                 if us_lock.rl_requested_disconnect {
150                                         break Disconnect::CloseConnection;
151                                 }
152                                 us_lock.read_paused
153                         };
154                         tokio::select! {
155                                 v = write_avail_receiver.recv() => {
156                                         assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
157                                         if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
158                                                 break Disconnect::CloseConnection;
159                                         }
160                                 },
161                                 _ = read_wake_receiver.recv() => {},
162                                 read = reader.read(&mut buf), if !read_paused => match read {
163                                         Ok(0) => break Disconnect::PeerDisconnected,
164                                         Ok(len) => {
165                                                 let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
166                                                 let mut us_lock = us.lock().unwrap();
167                                                 match read_res {
168                                                         Ok(pause_read) => {
169                                                                 if pause_read {
170                                                                         us_lock.read_paused = true;
171                                                                 }
172                                                         },
173                                                         Err(_) => break Disconnect::CloseConnection,
174                                                 }
175                                         },
176                                         Err(_) => break Disconnect::PeerDisconnected,
177                                 },
178                         }
179                         peer_manager.process_events();
180                 };
181                 let writer_option = us.lock().unwrap().writer.take();
182                 if let Some(mut writer) = writer_option {
183                         // If the socket is already closed, shutdown() will fail, so just ignore it.
184                         let _ = writer.shutdown().await;
185                 }
186                 if let Disconnect::PeerDisconnected = disconnect_type {
187                         peer_manager.socket_disconnected(&our_descriptor);
188                         peer_manager.process_events();
189                 }
190         }
191
192         fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
193                 // We only ever need a channel of depth 1 here: if we returned a non-full write to the
194                 // PeerManager, we will eventually get notified that there is room in the socket to write
195                 // new bytes, which will generate an event. That event will be popped off the queue before
196                 // we call write_buffer_space_avail, ensuring that we have room to push a new () if, during
197                 // the write_buffer_space_avail() call, send_data() returns a non-full write.
198                 let (write_avail, write_receiver) = mpsc::channel(1);
199                 // Similarly here - our only goal is to make sure the reader wakes up at some point after
200                 // we shove a value into the channel which comes after we've reset the read_paused bool to
201                 // false.
202                 let (read_waker, read_receiver) = mpsc::channel(1);
203                 stream.set_nonblocking(true).unwrap();
204                 let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
205
206                 (reader, write_receiver, read_receiver,
207                 Arc::new(Mutex::new(Self {
208                         writer: Some(writer), write_avail, read_waker, read_paused: false,
209                         rl_requested_disconnect: false,
210                         id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
211                 })))
212         }
213 }
214
215 /// Process incoming messages and feed outgoing messages on the provided socket generated by
216 /// accepting an incoming connection.
217 ///
218 /// The returned future will complete when the peer is disconnected and associated handling
219 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
220 /// not need to poll the provided future in order to make progress.
221 pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
222                 CMH: ChannelMessageHandler + 'static + Send + Sync,
223                 RMH: RoutingMessageHandler + 'static + Send + Sync,
224                 L: Logger + 'static + ?Sized + Send + Sync,
225                 UMH: CustomMessageHandler + 'static + Send + Sync {
226         let ip_addr = stream.peer_addr().unwrap();
227         let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
228         #[cfg(debug_assertions)]
229         let last_us = Arc::clone(&us);
230
231         let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), match ip_addr.ip() {
232                 IpAddr::V4(ip) => Some(NetAddress::IPv4 {
233                         addr: ip.octets(),
234                         port: ip_addr.port(),
235                 }),
236                 IpAddr::V6(ip) => Some(NetAddress::IPv6 {
237                         addr: ip.octets(),
238                         port: ip_addr.port(),
239                 }),
240         }) {
241                 Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
242         } else {
243                 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
244                 // requirements.
245                 None
246         };
247
248         async move {
249                 if let Some(handle) = handle_opt {
250                         if let Err(e) = handle.await {
251                                 assert!(e.is_cancelled());
252                         } else {
253                                 // This is certainly not guaranteed to always be true - the read loop may exit
254                                 // while there are still pending write wakers that need to be woken up after the
255                                 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
256                                 // keep too many wakers around, this makes sense. The race should be rare (we do
257                                 // some work after shutdown()) and an error would be a major memory leak.
258                                 #[cfg(debug_assertions)]
259                                 assert!(Arc::try_unwrap(last_us).is_ok());
260                         }
261                 }
262         }
263 }
264
265 /// Process incoming messages and feed outgoing messages on the provided socket generated by
266 /// making an outbound connection which is expected to be accepted by a peer with the given
267 /// public key. The relevant processing is set to run free (via tokio::spawn).
268 ///
269 /// The returned future will complete when the peer is disconnected and associated handling
270 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
271 /// not need to poll the provided future in order to make progress.
272 pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
273                 CMH: ChannelMessageHandler + 'static + Send + Sync,
274                 RMH: RoutingMessageHandler + 'static + Send + Sync,
275                 L: Logger + 'static + ?Sized + Send + Sync,
276                 UMH: CustomMessageHandler + 'static + Send + Sync {
277         let ip_addr = stream.peer_addr().unwrap();
278         let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
279         #[cfg(debug_assertions)]
280         let last_us = Arc::clone(&us);
281         let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), match ip_addr.ip() {
282                 IpAddr::V4(ip) => Some(NetAddress::IPv4 {
283                         addr: ip.octets(),
284                         port: ip_addr.port(),
285                 }),
286                 IpAddr::V6(ip) => Some(NetAddress::IPv6 {
287                         addr: ip.octets(),
288                         port: ip_addr.port(),
289                 }),
290         }) {
291                 Some(tokio::spawn(async move {
292                         // We should essentially always have enough room in a TCP socket buffer to send the
293                         // initial 10s of bytes. However, tokio running in single-threaded mode will always
294                         // fail writes and wake us back up later to write. Thus, we handle a single
295                         // std::task::Poll::Pending but still expect to write the full set of bytes at once
296                         // and use a relatively tight timeout.
297                         if let Ok(Ok(())) = tokio::time::timeout(Duration::from_millis(100), async {
298                                 loop {
299                                         match SocketDescriptor::new(us.clone()).send_data(&initial_send, true) {
300                                                 v if v == initial_send.len() => break Ok(()),
301                                                 0 => {
302                                                         write_receiver.recv().await;
303                                                         // In theory we could check for if we've been instructed to disconnect
304                                                         // the peer here, but its OK to just skip it - we'll check for it in
305                                                         // schedule_read prior to any relevant calls into RL.
306                                                 },
307                                                 _ => {
308                                                         eprintln!("Failed to write first full message to socket!");
309                                                         peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us)));
310                                                         break Err(());
311                                                 }
312                                         }
313                                 }
314                         }).await {
315                                 Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver).await;
316                         }
317                 }))
318         } else {
319                 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
320                 // requirements.
321                 None
322         };
323
324         async move {
325                 if let Some(handle) = handle_opt {
326                         if let Err(e) = handle.await {
327                                 assert!(e.is_cancelled());
328                         } else {
329                                 // This is certainly not guaranteed to always be true - the read loop may exit
330                                 // while there are still pending write wakers that need to be woken up after the
331                                 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
332                                 // keep too many wakers around, this makes sense. The race should be rare (we do
333                                 // some work after shutdown()) and an error would be a major memory leak.
334                                 #[cfg(debug_assertions)]
335                                 assert!(Arc::try_unwrap(last_us).is_ok());
336                         }
337                 }
338         }
339 }
340
341 /// Process incoming messages and feed outgoing messages on a new connection made to the given
342 /// socket address which is expected to be accepted by a peer with the given public key (by
343 /// scheduling futures with tokio::spawn).
344 ///
345 /// Shorthand for TcpStream::connect(addr) with a timeout followed by setup_outbound().
346 ///
347 /// Returns a future (as the fn is async) which needs to be polled to complete the connection and
348 /// connection setup. That future then returns a future which will complete when the peer is
349 /// disconnected and associated handling futures are freed, though, because all processing in said
350 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
351 /// make progress.
352 pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
353                 CMH: ChannelMessageHandler + 'static + Send + Sync,
354                 RMH: RoutingMessageHandler + 'static + Send + Sync,
355                 L: Logger + 'static + ?Sized + Send + Sync,
356                 UMH: CustomMessageHandler + 'static + Send + Sync {
357         if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
358                 Some(setup_outbound(peer_manager, their_node_id, stream))
359         } else { None }
360 }
361
362 const SOCK_WAKER_VTABLE: task::RawWakerVTable =
363         task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
364
365 fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
366         write_avail_to_waker(orig_ptr as *const mpsc::Sender<()>)
367 }
368 // When waking, an error should be fine. Most likely we got two send_datas in a row, both of which
369 // failed to fully write, but we only need to call write_buffer_space_avail() once. Otherwise, the
370 // sending thread may have already gone away due to a socket close, in which case there's nothing
371 // to wake up anyway.
372 fn wake_socket_waker(orig_ptr: *const ()) {
373         let sender = unsafe { &mut *(orig_ptr as *mut mpsc::Sender<()>) };
374         let _ = sender.try_send(());
375         drop_socket_waker(orig_ptr);
376 }
377 fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
378         let sender_ptr = orig_ptr as *const mpsc::Sender<()>;
379         let sender = unsafe { (*sender_ptr).clone() };
380         let _ = sender.try_send(());
381 }
382 fn drop_socket_waker(orig_ptr: *const ()) {
383         let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
384         // _orig_box is now dropped
385 }
386 fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker {
387         let new_box = Box::leak(Box::new(unsafe { (*sender).clone() }));
388         let new_ptr = new_box as *const mpsc::Sender<()>;
389         task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
390 }
391
392 /// The SocketDescriptor used to refer to sockets by a PeerHandler. This is pub only as it is a
393 /// type in the template of PeerHandler.
394 pub struct SocketDescriptor {
395         conn: Arc<Mutex<Connection>>,
396         id: u64,
397 }
398 impl SocketDescriptor {
399         fn new(conn: Arc<Mutex<Connection>>) -> Self {
400                 let id = conn.lock().unwrap().id;
401                 Self { conn, id }
402         }
403 }
404 impl peer_handler::SocketDescriptor for SocketDescriptor {
405         fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
406                 // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
407                 // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
408                 // a SocketDescriptor in it which can wake up the write_avail Sender, waking up the
409                 // processing future which will call write_buffer_space_avail and we'll end up back here.
410                 let mut us = self.conn.lock().unwrap();
411                 if us.writer.is_none() {
412                         // The writer gets take()n when it is time to shut down, so just fast-return 0 here.
413                         return 0;
414                 }
415
416                 if resume_read && us.read_paused {
417                         // The schedule_read future may go to lock up but end up getting woken up by there
418                         // being more room in the write buffer, dropping the other end of this Sender
419                         // before we get here, so we ignore any failures to wake it up.
420                         us.read_paused = false;
421                         let _ = us.read_waker.try_send(());
422                 }
423                 if data.is_empty() { return 0; }
424                 let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&us.write_avail)) };
425                 let mut ctx = task::Context::from_waker(&waker);
426                 let mut written_len = 0;
427                 loop {
428                         match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
429                                 task::Poll::Ready(Ok(res)) => {
430                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
431                                         // know how to handle it if it does (cause it should be a Poll::Pending
432                                         // instead):
433                                         assert_ne!(res, 0);
434                                         written_len += res;
435                                         if written_len == data.len() { return written_len; }
436                                 },
437                                 task::Poll::Ready(Err(e)) => {
438                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
439                                         // know how to handle it if it does (cause it should be a Poll::Pending
440                                         // instead):
441                                         assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
442                                         // Probably we've already been closed, just return what we have and let the
443                                         // read thread handle closing logic.
444                                         return written_len;
445                                 },
446                                 task::Poll::Pending => {
447                                         // We're queued up for a write event now, but we need to make sure we also
448                                         // pause read given we're now waiting on the remote end to ACK (and in
449                                         // accordance with the send_data() docs).
450                                         us.read_paused = true;
451                                         return written_len;
452                                 },
453                         }
454                 }
455         }
456
457         fn disconnect_socket(&mut self) {
458                 let mut us = self.conn.lock().unwrap();
459                 us.rl_requested_disconnect = true;
460                 // Wake up the sending thread, assuming it is still alive
461                 let _ = us.write_avail.try_send(());
462         }
463 }
464 impl Clone for SocketDescriptor {
465         fn clone(&self) -> Self {
466                 Self {
467                         conn: Arc::clone(&self.conn),
468                         id: self.id,
469                 }
470         }
471 }
472 impl Eq for SocketDescriptor {}
473 impl PartialEq for SocketDescriptor {
474         fn eq(&self, o: &Self) -> bool {
475                 self.id == o.id
476         }
477 }
478 impl Hash for SocketDescriptor {
479         fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
480                 self.id.hash(state);
481         }
482 }
483
484 #[cfg(test)]
485 mod tests {
486         use lightning::ln::features::*;
487         use lightning::ln::msgs::*;
488         use lightning::ln::peer_handler::{MessageHandler, PeerManager};
489         use lightning::util::events::*;
490         use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey};
491
492         use tokio::sync::mpsc;
493
494         use std::mem;
495         use std::sync::atomic::{AtomicBool, Ordering};
496         use std::sync::{Arc, Mutex};
497         use std::time::Duration;
498
499         pub struct TestLogger();
500         impl lightning::util::logger::Logger for TestLogger {
501                 fn log(&self, record: &lightning::util::logger::Record) {
502                         println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
503                 }
504         }
505
506         struct MsgHandler{
507                 expected_pubkey: PublicKey,
508                 pubkey_connected: mpsc::Sender<()>,
509                 pubkey_disconnected: mpsc::Sender<()>,
510                 disconnected_flag: AtomicBool,
511                 msg_events: Mutex<Vec<MessageSendEvent>>,
512         }
513         impl RoutingMessageHandler for MsgHandler {
514                 fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
515                 fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
516                 fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
517                 fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { Vec::new() }
518                 fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<NodeAnnouncement> { Vec::new() }
519                 fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
520                 fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
521                 fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
522                 fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
523                 fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
524         }
525         impl ChannelMessageHandler for MsgHandler {
526                 fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &OpenChannel) {}
527                 fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &AcceptChannel) {}
528                 fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {}
529                 fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {}
530                 fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &FundingLocked) {}
531                 fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, _msg: &Shutdown) {}
532                 fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {}
533                 fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {}
534                 fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFulfillHTLC) {}
535                 fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailHTLC) {}
536                 fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailMalformedHTLC) {}
537                 fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &CommitmentSigned) {}
538                 fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &RevokeAndACK) {}
539                 fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
540                 fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
541                 fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
542                 fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
543                         if *their_node_id == self.expected_pubkey {
544                                 self.disconnected_flag.store(true, Ordering::SeqCst);
545                                 self.pubkey_disconnected.clone().try_send(()).unwrap();
546                         }
547                 }
548                 fn peer_connected(&self, their_node_id: &PublicKey, _msg: &Init) {
549                         if *their_node_id == self.expected_pubkey {
550                                 self.pubkey_connected.clone().try_send(()).unwrap();
551                         }
552                 }
553                 fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &ChannelReestablish) {}
554                 fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {}
555         }
556         impl MessageSendEventsProvider for MsgHandler {
557                 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
558                         let mut ret = Vec::new();
559                         mem::swap(&mut *self.msg_events.lock().unwrap(), &mut ret);
560                         ret
561                 }
562         }
563
564         async fn do_basic_connection_test() {
565                 let secp_ctx = Secp256k1::new();
566                 let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
567                 let b_key = SecretKey::from_slice(&[1; 32]).unwrap();
568                 let a_pub = PublicKey::from_secret_key(&secp_ctx, &a_key);
569                 let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key);
570
571                 let (a_connected_sender, mut a_connected) = mpsc::channel(1);
572                 let (a_disconnected_sender, mut a_disconnected) = mpsc::channel(1);
573                 let a_handler = Arc::new(MsgHandler {
574                         expected_pubkey: b_pub,
575                         pubkey_connected: a_connected_sender,
576                         pubkey_disconnected: a_disconnected_sender,
577                         disconnected_flag: AtomicBool::new(false),
578                         msg_events: Mutex::new(Vec::new()),
579                 });
580                 let a_manager = Arc::new(PeerManager::new(MessageHandler {
581                         chan_handler: Arc::clone(&a_handler),
582                         route_handler: Arc::clone(&a_handler),
583                 }, a_key.clone(), &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
584
585                 let (b_connected_sender, mut b_connected) = mpsc::channel(1);
586                 let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1);
587                 let b_handler = Arc::new(MsgHandler {
588                         expected_pubkey: a_pub,
589                         pubkey_connected: b_connected_sender,
590                         pubkey_disconnected: b_disconnected_sender,
591                         disconnected_flag: AtomicBool::new(false),
592                         msg_events: Mutex::new(Vec::new()),
593                 });
594                 let b_manager = Arc::new(PeerManager::new(MessageHandler {
595                         chan_handler: Arc::clone(&b_handler),
596                         route_handler: Arc::clone(&b_handler),
597                 }, b_key.clone(), &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
598
599                 // We bind on localhost, hoping the environment is properly configured with a local
600                 // address. This may not always be the case in containers and the like, so if this test is
601                 // failing for you check that you have a loopback interface and it is configured with
602                 // 127.0.0.1.
603                 let (conn_a, conn_b) = if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") {
604                         (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0)
605                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") {
606                         (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0)
607                 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") {
608                         (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0)
609                 } else { panic!("Failed to bind to v4 localhost on common ports"); };
610
611                 let fut_a = super::setup_outbound(Arc::clone(&a_manager), b_pub, conn_a);
612                 let fut_b = super::setup_inbound(b_manager, conn_b);
613
614                 tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap();
615                 tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap();
616
617                 a_handler.msg_events.lock().unwrap().push(MessageSendEvent::HandleError {
618                         node_id: b_pub, action: ErrorAction::DisconnectPeer { msg: None }
619                 });
620                 assert!(!a_handler.disconnected_flag.load(Ordering::SeqCst));
621                 assert!(!b_handler.disconnected_flag.load(Ordering::SeqCst));
622
623                 a_manager.process_events();
624                 tokio::time::timeout(Duration::from_secs(10), a_disconnected.recv()).await.unwrap();
625                 tokio::time::timeout(Duration::from_secs(1), b_disconnected.recv()).await.unwrap();
626                 assert!(a_handler.disconnected_flag.load(Ordering::SeqCst));
627                 assert!(b_handler.disconnected_flag.load(Ordering::SeqCst));
628
629                 fut_a.await;
630                 fut_b.await;
631         }
632
633         #[tokio::test(flavor = "multi_thread")]
634         async fn basic_threaded_connection_test() {
635                 do_basic_connection_test().await;
636         }
637         #[tokio::test]
638         async fn basic_unthreaded_connection_test() {
639                 do_basic_connection_test().await;
640         }
641 }