1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! A socket handling library for those running in Tokio environments who wish to use
11 //! rust-lightning with native TcpStreams.
13 //! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a
14 //! TcpStream and a reference to a PeerManager and the rest is handled", except for the
15 //! [Event](../lightning/util/events/enum.Event.html) handling mechanism; see example below.
17 //! The PeerHandler, due to the fire-and-forget nature of this logic, must be an Arc, and must use
18 //! the SocketDescriptor provided here as the PeerHandler's SocketDescriptor.
20 //! Three methods are exposed to register a new connection for handling in tokio::spawn calls; see
21 //! their individual docs for details.
25 //! use std::net::TcpStream;
26 //! use bitcoin::secp256k1::PublicKey;
27 //! use lightning::util::events::{Event, EventHandler, EventsProvider};
28 //! use std::net::SocketAddr;
29 //! use std::sync::Arc;
31 //! // Define concrete types for our high-level objects:
32 //! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
33 //! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
34 //! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
35 //! type ChainAccess = dyn lightning::chain::Access + Send + Sync;
36 //! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
37 //! type DataPersister = dyn lightning::chain::chainmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
38 //! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
39 //! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
40 //! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>>;
42 //! // Connect to node with pubkey their_node_id at addr:
43 //! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
44 //! lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await;
46 //! let event_handler = |event: Event| {
47 //! // Handle the event!
49 //! channel_manager.await_persistable_update();
50 //! channel_manager.process_pending_events(&event_handler);
51 //! chain_monitor.process_pending_events(&event_handler);
55 //! // Begin reading from a newly accepted socket and talk to the peer:
56 //! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
57 //! lightning_net_tokio::setup_inbound(peer_manager, socket);
59 //! let event_handler = |event: Event| {
60 //! // Handle the event!
62 //! channel_manager.await_persistable_update();
63 //! channel_manager.process_pending_events(&event_handler);
64 //! chain_monitor.process_pending_events(&event_handler);
69 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
70 #![deny(broken_intra_doc_links)]
71 #![deny(private_intra_doc_links)]
73 #![deny(missing_docs)]
74 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
76 use bitcoin::secp256k1::PublicKey;
78 use tokio::net::TcpStream;
79 use tokio::{io, time};
80 use tokio::sync::mpsc;
81 use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
83 use lightning::ln::peer_handler;
84 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
85 use lightning::ln::peer_handler::CustomMessageHandler;
86 use lightning::ln::msgs::{ChannelMessageHandler, NetAddress, OnionMessageHandler, RoutingMessageHandler};
87 use lightning::util::logger::Logger;
91 use std::net::SocketAddr;
92 use std::net::TcpStream as StdTcpStream;
93 use std::sync::{Arc, Mutex};
94 use std::sync::atomic::{AtomicU64, Ordering};
95 use std::time::Duration;
98 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
100 /// Connection contains all our internal state for a connection - we hold a reference to the
101 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
102 /// read future (which is returned by schedule_read).
104 writer: Option<io::WriteHalf<TcpStream>>,
105 // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
106 // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
107 // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
108 // This provides that indirection, with a Sender which gets handed to the PeerManager Arc on
109 // the schedule_read stack.
111 // An alternative (likely more effecient) approach would involve creating a RawWakerVTable at
112 // runtime with functions templated by the Arc<PeerManager> type, calling
113 // write_buffer_space_avail directly from tokio's write wake, however doing so would require
114 // more unsafe voodo than I really feel like writing.
115 write_avail: mpsc::Sender<()>,
116 // When we are told by rust-lightning to pause read (because we have writes backing up), we do
117 // so by setting read_paused. At that point, the read task will stop reading bytes from the
118 // socket. To wake it up (without otherwise changing its state, we can push a value into this
120 read_waker: mpsc::Sender<()>,
122 rl_requested_disconnect: bool,
126 async fn poll_event_process<PM, CMH, RMH, OMH, L, UMH>(
128 mut event_receiver: mpsc::Receiver<()>,
130 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>> + 'static + Send + Sync,
131 CMH: Deref + 'static + Send + Sync,
132 RMH: Deref + 'static + Send + Sync,
133 OMH: Deref + 'static + Send + Sync,
134 L: Deref + 'static + Send + Sync,
135 UMH: Deref + 'static + Send + Sync,
136 CMH::Target: ChannelMessageHandler + Send + Sync,
137 RMH::Target: RoutingMessageHandler + Send + Sync,
138 OMH::Target: OnionMessageHandler + Send + Sync,
139 L::Target: Logger + Send + Sync,
140 UMH::Target: CustomMessageHandler + Send + Sync,
143 if event_receiver.recv().await.is_none() {
146 peer_manager.process_events();
150 async fn schedule_read<PM, CMH, RMH, OMH, L, UMH>(
152 us: Arc<Mutex<Self>>,
153 mut reader: io::ReadHalf<TcpStream>,
154 mut read_wake_receiver: mpsc::Receiver<()>,
155 mut write_avail_receiver: mpsc::Receiver<()>,
157 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>> + 'static + Send + Sync + Clone,
158 CMH: Deref + 'static + Send + Sync,
159 RMH: Deref + 'static + Send + Sync,
160 OMH: Deref + 'static + Send + Sync,
161 L: Deref + 'static + Send + Sync,
162 UMH: Deref + 'static + Send + Sync,
163 CMH::Target: ChannelMessageHandler + 'static + Send + Sync,
164 RMH::Target: RoutingMessageHandler + 'static + Send + Sync,
165 OMH::Target: OnionMessageHandler + 'static + Send + Sync,
166 L::Target: Logger + 'static + Send + Sync,
167 UMH::Target: CustomMessageHandler + 'static + Send + Sync,
169 // Create a waker to wake up poll_event_process, above
170 let (event_waker, event_receiver) = mpsc::channel(1);
171 tokio::spawn(Self::poll_event_process(peer_manager.clone(), event_receiver));
173 // 8KB is nice and big but also should never cause any issues with stack overflowing.
174 let mut buf = [0; 8192];
176 let mut our_descriptor = SocketDescriptor::new(us.clone());
177 // An enum describing why we did/are disconnecting:
179 // Rust-Lightning told us to disconnect, either by returning an Err or by calling
180 // SocketDescriptor::disconnect_socket.
181 // In this case, we do not call peer_manager.socket_disconnected() as Rust-Lightning
182 // already knows we're disconnected.
184 // The connection was disconnected for some other reason, ie because the socket was
186 // In this case, we do need to call peer_manager.socket_disconnected() to inform
187 // Rust-Lightning that the socket is gone.
190 let disconnect_type = loop {
192 let us_lock = us.lock().unwrap();
193 if us_lock.rl_requested_disconnect {
194 break Disconnect::CloseConnection;
199 v = write_avail_receiver.recv() => {
200 assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
201 if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
202 break Disconnect::CloseConnection;
205 _ = read_wake_receiver.recv() => {},
206 read = reader.read(&mut buf), if !read_paused => match read {
207 Ok(0) => break Disconnect::PeerDisconnected,
209 let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
210 let mut us_lock = us.lock().unwrap();
214 us_lock.read_paused = true;
217 Err(_) => break Disconnect::CloseConnection,
220 Err(_) => break Disconnect::PeerDisconnected,
223 let _ = event_waker.try_send(());
225 // At this point we've processed a message or two, and reset the ping timer for this
226 // peer, at least in the "are we still receiving messages" context, if we don't give up
227 // our timeslice to another task we may just spin on this peer, starving other peers
228 // and eventually disconnecting them for ping timeouts. Instead, we explicitly yield
230 tokio::task::yield_now().await;
232 let writer_option = us.lock().unwrap().writer.take();
233 if let Some(mut writer) = writer_option {
234 // If the socket is already closed, shutdown() will fail, so just ignore it.
235 let _ = writer.shutdown().await;
237 if let Disconnect::PeerDisconnected = disconnect_type {
238 peer_manager.socket_disconnected(&our_descriptor);
239 peer_manager.process_events();
243 fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
244 // We only ever need a channel of depth 1 here: if we returned a non-full write to the
245 // PeerManager, we will eventually get notified that there is room in the socket to write
246 // new bytes, which will generate an event. That event will be popped off the queue before
247 // we call write_buffer_space_avail, ensuring that we have room to push a new () if, during
248 // the write_buffer_space_avail() call, send_data() returns a non-full write.
249 let (write_avail, write_receiver) = mpsc::channel(1);
250 // Similarly here - our only goal is to make sure the reader wakes up at some point after
251 // we shove a value into the channel which comes after we've reset the read_paused bool to
253 let (read_waker, read_receiver) = mpsc::channel(1);
254 stream.set_nonblocking(true).unwrap();
255 let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
257 (reader, write_receiver, read_receiver,
258 Arc::new(Mutex::new(Self {
259 writer: Some(writer), write_avail, read_waker, read_paused: false,
260 rl_requested_disconnect: false,
261 id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
266 fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
267 match stream.peer_addr() {
268 Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
269 addr: sockaddr.ip().octets(),
270 port: sockaddr.port(),
272 Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
273 addr: sockaddr.ip().octets(),
274 port: sockaddr.port(),
280 /// Process incoming messages and feed outgoing messages on the provided socket generated by
281 /// accepting an incoming connection.
283 /// The returned future will complete when the peer is disconnected and associated handling
284 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
285 /// not need to poll the provided future in order to make progress.
286 pub fn setup_inbound<PM, CMH, RMH, OMH, L, UMH>(
288 stream: StdTcpStream,
289 ) -> impl std::future::Future<Output=()> where
290 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>> + 'static + Send + Sync + Clone,
291 CMH: Deref + 'static + Send + Sync,
292 RMH: Deref + 'static + Send + Sync,
293 OMH: Deref + 'static + Send + Sync,
294 L: Deref + 'static + Send + Sync,
295 UMH: Deref + 'static + Send + Sync,
296 CMH::Target: ChannelMessageHandler + Send + Sync,
297 RMH::Target: RoutingMessageHandler + Send + Sync,
298 OMH::Target: OnionMessageHandler + Send + Sync,
299 L::Target: Logger + Send + Sync,
300 UMH::Target: CustomMessageHandler + Send + Sync,
302 let remote_addr = get_addr_from_stream(&stream);
303 let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
304 #[cfg(debug_assertions)]
305 let last_us = Arc::clone(&us);
307 let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
308 Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
310 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
316 if let Some(handle) = handle_opt {
317 if let Err(e) = handle.await {
318 assert!(e.is_cancelled());
320 // This is certainly not guaranteed to always be true - the read loop may exit
321 // while there are still pending write wakers that need to be woken up after the
322 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
323 // keep too many wakers around, this makes sense. The race should be rare (we do
324 // some work after shutdown()) and an error would be a major memory leak.
325 #[cfg(debug_assertions)]
326 assert!(Arc::try_unwrap(last_us).is_ok());
332 /// Process incoming messages and feed outgoing messages on the provided socket generated by
333 /// making an outbound connection which is expected to be accepted by a peer with the given
334 /// public key. The relevant processing is set to run free (via tokio::spawn).
336 /// The returned future will complete when the peer is disconnected and associated handling
337 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
338 /// not need to poll the provided future in order to make progress.
339 pub fn setup_outbound<PM, CMH, RMH, OMH, L, UMH>(
341 their_node_id: PublicKey,
342 stream: StdTcpStream,
343 ) -> impl std::future::Future<Output=()> where
344 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>> + 'static + Send + Sync + Clone,
345 CMH: Deref + 'static + Send + Sync,
346 RMH: Deref + 'static + Send + Sync,
347 OMH: Deref + 'static + Send + Sync,
348 L: Deref + 'static + Send + Sync,
349 UMH: Deref + 'static + Send + Sync,
350 CMH::Target: ChannelMessageHandler + Send + Sync,
351 RMH::Target: RoutingMessageHandler + Send + Sync,
352 OMH::Target: OnionMessageHandler + Send + Sync,
353 L::Target: Logger + Send + Sync,
354 UMH::Target: CustomMessageHandler + Send + Sync,
356 let remote_addr = get_addr_from_stream(&stream);
357 let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
358 #[cfg(debug_assertions)]
359 let last_us = Arc::clone(&us);
360 let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
361 Some(tokio::spawn(async move {
362 // We should essentially always have enough room in a TCP socket buffer to send the
363 // initial 10s of bytes. However, tokio running in single-threaded mode will always
364 // fail writes and wake us back up later to write. Thus, we handle a single
365 // std::task::Poll::Pending but still expect to write the full set of bytes at once
366 // and use a relatively tight timeout.
367 if let Ok(Ok(())) = tokio::time::timeout(Duration::from_millis(100), async {
369 match SocketDescriptor::new(us.clone()).send_data(&initial_send, true) {
370 v if v == initial_send.len() => break Ok(()),
372 write_receiver.recv().await;
373 // In theory we could check for if we've been instructed to disconnect
374 // the peer here, but its OK to just skip it - we'll check for it in
375 // schedule_read prior to any relevant calls into RL.
378 eprintln!("Failed to write first full message to socket!");
379 peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us)));
385 Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver).await;
389 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
395 if let Some(handle) = handle_opt {
396 if let Err(e) = handle.await {
397 assert!(e.is_cancelled());
399 // This is certainly not guaranteed to always be true - the read loop may exit
400 // while there are still pending write wakers that need to be woken up after the
401 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
402 // keep too many wakers around, this makes sense. The race should be rare (we do
403 // some work after shutdown()) and an error would be a major memory leak.
404 #[cfg(debug_assertions)]
405 assert!(Arc::try_unwrap(last_us).is_ok());
411 /// Process incoming messages and feed outgoing messages on a new connection made to the given
412 /// socket address which is expected to be accepted by a peer with the given public key (by
413 /// scheduling futures with tokio::spawn).
415 /// Shorthand for TcpStream::connect(addr) with a timeout followed by setup_outbound().
417 /// Returns a future (as the fn is async) which needs to be polled to complete the connection and
418 /// connection setup. That future then returns a future which will complete when the peer is
419 /// disconnected and associated handling futures are freed, though, because all processing in said
420 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
422 pub async fn connect_outbound<PM, CMH, RMH, OMH, L, UMH>(
424 their_node_id: PublicKey,
426 ) -> Option<impl std::future::Future<Output=()>> where
427 PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>> + 'static + Send + Sync + Clone,
428 CMH: Deref + 'static + Send + Sync,
429 RMH: Deref + 'static + Send + Sync,
430 OMH: Deref + 'static + Send + Sync,
431 L: Deref + 'static + Send + Sync,
432 UMH: Deref + 'static + Send + Sync,
433 CMH::Target: ChannelMessageHandler + Send + Sync,
434 RMH::Target: RoutingMessageHandler + Send + Sync,
435 OMH::Target: OnionMessageHandler + Send + Sync,
436 L::Target: Logger + Send + Sync,
437 UMH::Target: CustomMessageHandler + Send + Sync,
439 if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
440 Some(setup_outbound(peer_manager, their_node_id, stream))
444 const SOCK_WAKER_VTABLE: task::RawWakerVTable =
445 task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
447 fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
448 write_avail_to_waker(orig_ptr as *const mpsc::Sender<()>)
450 // When waking, an error should be fine. Most likely we got two send_datas in a row, both of which
451 // failed to fully write, but we only need to call write_buffer_space_avail() once. Otherwise, the
452 // sending thread may have already gone away due to a socket close, in which case there's nothing
453 // to wake up anyway.
454 fn wake_socket_waker(orig_ptr: *const ()) {
455 let sender = unsafe { &mut *(orig_ptr as *mut mpsc::Sender<()>) };
456 let _ = sender.try_send(());
457 drop_socket_waker(orig_ptr);
459 fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
460 let sender_ptr = orig_ptr as *const mpsc::Sender<()>;
461 let sender = unsafe { (*sender_ptr).clone() };
462 let _ = sender.try_send(());
464 fn drop_socket_waker(orig_ptr: *const ()) {
465 let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
466 // _orig_box is now dropped
468 fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker {
469 let new_box = Box::leak(Box::new(unsafe { (*sender).clone() }));
470 let new_ptr = new_box as *const mpsc::Sender<()>;
471 task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
474 /// The SocketDescriptor used to refer to sockets by a PeerHandler. This is pub only as it is a
475 /// type in the template of PeerHandler.
476 pub struct SocketDescriptor {
477 conn: Arc<Mutex<Connection>>,
480 impl SocketDescriptor {
481 fn new(conn: Arc<Mutex<Connection>>) -> Self {
482 let id = conn.lock().unwrap().id;
486 impl peer_handler::SocketDescriptor for SocketDescriptor {
487 fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
488 // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
489 // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
490 // a SocketDescriptor in it which can wake up the write_avail Sender, waking up the
491 // processing future which will call write_buffer_space_avail and we'll end up back here.
492 let mut us = self.conn.lock().unwrap();
493 if us.writer.is_none() {
494 // The writer gets take()n when it is time to shut down, so just fast-return 0 here.
498 if resume_read && us.read_paused {
499 // The schedule_read future may go to lock up but end up getting woken up by there
500 // being more room in the write buffer, dropping the other end of this Sender
501 // before we get here, so we ignore any failures to wake it up.
502 us.read_paused = false;
503 let _ = us.read_waker.try_send(());
505 if data.is_empty() { return 0; }
506 let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&us.write_avail)) };
507 let mut ctx = task::Context::from_waker(&waker);
508 let mut written_len = 0;
510 match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
511 task::Poll::Ready(Ok(res)) => {
512 // The tokio docs *seem* to indicate this can't happen, and I certainly don't
513 // know how to handle it if it does (cause it should be a Poll::Pending
517 if written_len == data.len() { return written_len; }
519 task::Poll::Ready(Err(e)) => {
520 // The tokio docs *seem* to indicate this can't happen, and I certainly don't
521 // know how to handle it if it does (cause it should be a Poll::Pending
523 assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
524 // Probably we've already been closed, just return what we have and let the
525 // read thread handle closing logic.
528 task::Poll::Pending => {
529 // We're queued up for a write event now, but we need to make sure we also
530 // pause read given we're now waiting on the remote end to ACK (and in
531 // accordance with the send_data() docs).
532 us.read_paused = true;
533 // Further, to avoid any current pending read causing a `read_event` call, wake
534 // up the read_waker and restart its loop.
535 let _ = us.read_waker.try_send(());
542 fn disconnect_socket(&mut self) {
543 let mut us = self.conn.lock().unwrap();
544 us.rl_requested_disconnect = true;
545 // Wake up the sending thread, assuming it is still alive
546 let _ = us.write_avail.try_send(());
549 impl Clone for SocketDescriptor {
550 fn clone(&self) -> Self {
552 conn: Arc::clone(&self.conn),
557 impl Eq for SocketDescriptor {}
558 impl PartialEq for SocketDescriptor {
559 fn eq(&self, o: &Self) -> bool {
563 impl Hash for SocketDescriptor {
564 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
571 use lightning::ln::features::*;
572 use lightning::ln::msgs::*;
573 use lightning::ln::peer_handler::{MessageHandler, PeerManager};
574 use lightning::ln::features::NodeFeatures;
575 use lightning::util::events::*;
576 use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey};
578 use tokio::sync::mpsc;
581 use std::sync::atomic::{AtomicBool, Ordering};
582 use std::sync::{Arc, Mutex};
583 use std::time::Duration;
585 pub struct TestLogger();
586 impl lightning::util::logger::Logger for TestLogger {
587 fn log(&self, record: &lightning::util::logger::Record) {
588 println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
593 expected_pubkey: PublicKey,
594 pubkey_connected: mpsc::Sender<()>,
595 pubkey_disconnected: mpsc::Sender<()>,
596 disconnected_flag: AtomicBool,
597 msg_events: Mutex<Vec<MessageSendEvent>>,
599 impl RoutingMessageHandler for MsgHandler {
600 fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
601 fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
602 fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
603 fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { None }
604 fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option<NodeAnnouncement> { None }
605 fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { Ok(()) }
606 fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
607 fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
608 fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
609 fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
610 fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
611 fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
613 impl ChannelMessageHandler for MsgHandler {
614 fn handle_open_channel(&self, _their_node_id: &PublicKey, _msg: &OpenChannel) {}
615 fn handle_accept_channel(&self, _their_node_id: &PublicKey, _msg: &AcceptChannel) {}
616 fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {}
617 fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {}
618 fn handle_channel_ready(&self, _their_node_id: &PublicKey, _msg: &ChannelReady) {}
619 fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &Shutdown) {}
620 fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {}
621 fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {}
622 fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFulfillHTLC) {}
623 fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailHTLC) {}
624 fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailMalformedHTLC) {}
625 fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &CommitmentSigned) {}
626 fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &RevokeAndACK) {}
627 fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
628 fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
629 fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
630 fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
631 if *their_node_id == self.expected_pubkey {
632 self.disconnected_flag.store(true, Ordering::SeqCst);
633 self.pubkey_disconnected.clone().try_send(()).unwrap();
636 fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> {
637 if *their_node_id == self.expected_pubkey {
638 self.pubkey_connected.clone().try_send(()).unwrap();
642 fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &ChannelReestablish) {}
643 fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {}
644 fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
645 fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
647 impl MessageSendEventsProvider for MsgHandler {
648 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
649 let mut ret = Vec::new();
650 mem::swap(&mut *self.msg_events.lock().unwrap(), &mut ret);
655 fn make_tcp_connection() -> (std::net::TcpStream, std::net::TcpStream) {
656 if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") {
657 (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0)
658 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:19735") {
659 (std::net::TcpStream::connect("127.0.0.1:19735").unwrap(), listener.accept().unwrap().0)
660 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9997") {
661 (std::net::TcpStream::connect("127.0.0.1:9997").unwrap(), listener.accept().unwrap().0)
662 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9998") {
663 (std::net::TcpStream::connect("127.0.0.1:9998").unwrap(), listener.accept().unwrap().0)
664 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") {
665 (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0)
666 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") {
667 (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0)
668 } else { panic!("Failed to bind to v4 localhost on common ports"); }
671 async fn do_basic_connection_test() {
672 let secp_ctx = Secp256k1::new();
673 let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
674 let b_key = SecretKey::from_slice(&[1; 32]).unwrap();
675 let a_pub = PublicKey::from_secret_key(&secp_ctx, &a_key);
676 let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key);
678 let (a_connected_sender, mut a_connected) = mpsc::channel(1);
679 let (a_disconnected_sender, mut a_disconnected) = mpsc::channel(1);
680 let a_handler = Arc::new(MsgHandler {
681 expected_pubkey: b_pub,
682 pubkey_connected: a_connected_sender,
683 pubkey_disconnected: a_disconnected_sender,
684 disconnected_flag: AtomicBool::new(false),
685 msg_events: Mutex::new(Vec::new()),
687 let a_manager = Arc::new(PeerManager::new(MessageHandler {
688 chan_handler: Arc::clone(&a_handler),
689 route_handler: Arc::clone(&a_handler),
690 onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
691 }, a_key.clone(), 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
693 let (b_connected_sender, mut b_connected) = mpsc::channel(1);
694 let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1);
695 let b_handler = Arc::new(MsgHandler {
696 expected_pubkey: a_pub,
697 pubkey_connected: b_connected_sender,
698 pubkey_disconnected: b_disconnected_sender,
699 disconnected_flag: AtomicBool::new(false),
700 msg_events: Mutex::new(Vec::new()),
702 let b_manager = Arc::new(PeerManager::new(MessageHandler {
703 chan_handler: Arc::clone(&b_handler),
704 route_handler: Arc::clone(&b_handler),
705 onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
706 }, b_key.clone(), 0, &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
708 // We bind on localhost, hoping the environment is properly configured with a local
709 // address. This may not always be the case in containers and the like, so if this test is
710 // failing for you check that you have a loopback interface and it is configured with
712 let (conn_a, conn_b) = make_tcp_connection();
714 let fut_a = super::setup_outbound(Arc::clone(&a_manager), b_pub, conn_a);
715 let fut_b = super::setup_inbound(b_manager, conn_b);
717 tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap();
718 tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap();
720 a_handler.msg_events.lock().unwrap().push(MessageSendEvent::HandleError {
721 node_id: b_pub, action: ErrorAction::DisconnectPeer { msg: None }
723 assert!(!a_handler.disconnected_flag.load(Ordering::SeqCst));
724 assert!(!b_handler.disconnected_flag.load(Ordering::SeqCst));
726 a_manager.process_events();
727 tokio::time::timeout(Duration::from_secs(10), a_disconnected.recv()).await.unwrap();
728 tokio::time::timeout(Duration::from_secs(1), b_disconnected.recv()).await.unwrap();
729 assert!(a_handler.disconnected_flag.load(Ordering::SeqCst));
730 assert!(b_handler.disconnected_flag.load(Ordering::SeqCst));
736 #[tokio::test(flavor = "multi_thread")]
737 async fn basic_threaded_connection_test() {
738 do_basic_connection_test().await;
742 async fn basic_unthreaded_connection_test() {
743 do_basic_connection_test().await;
746 async fn race_disconnect_accept() {
747 // Previously, if we handed an already-disconnected socket to `setup_inbound` we'd panic.
748 // This attempts to find other similar races by opening connections and shutting them down
749 // while connecting. Sadly in testing this did *not* reproduce the previous issue.
750 let secp_ctx = Secp256k1::new();
751 let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
752 let b_key = SecretKey::from_slice(&[2; 32]).unwrap();
753 let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key);
755 let a_manager = Arc::new(PeerManager::new(MessageHandler {
756 chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()),
757 onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
758 route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
759 }, a_key, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
761 // Make two connections, one for an inbound and one for an outbound connection
763 let (conn_a, _) = make_tcp_connection();
767 let (_, conn_b) = make_tcp_connection();
771 // Call connection setup inside new tokio tasks.
772 let manager_reference = Arc::clone(&a_manager);
773 tokio::spawn(async move {
774 super::setup_inbound(manager_reference, conn_a).await
776 tokio::spawn(async move {
777 super::setup_outbound(a_manager, b_pub, conn_b).await
781 #[tokio::test(flavor = "multi_thread")]
782 async fn threaded_race_disconnect_accept() {
783 race_disconnect_accept().await;
787 async fn unthreaded_race_disconnect_accept() {
788 race_disconnect_accept().await;