1 //! A socket handling library for those running in Tokio environments who wish to use
2 //! rust-lightning with native TcpStreams.
4 //! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a
5 //! TcpStream and a reference to a PeerManager and the rest is handled", except for the
6 //! [Event](../lightning/util/events/enum.Event.html) handlng mechanism, see below.
8 //! The PeerHandler, due to the fire-and-forget nature of this logic, must be an Arc, and must use
9 //! the SocketDescriptor provided here as the PeerHandler's SocketDescriptor.
11 //! Three methods are exposed to register a new connection for handling in tokio::spawn calls, see
12 //! their individual docs for more. All three take a
13 //! [mpsc::Sender<()>](../tokio/sync/mpsc/struct.Sender.html) which is sent into every time
14 //! something occurs which may result in lightning [Events](../lightning/util/events/enum.Event.html).
15 //! The call site should, thus, look something like this:
17 //! use tokio::sync::mpsc;
18 //! use tokio::net::TcpStream;
19 //! use secp256k1::key::PublicKey;
20 //! use lightning::util::events::EventsProvider;
21 //! use std::net::SocketAddr;
22 //! use std::sync::Arc;
24 //! // Define concrete types for our high-level objects:
25 //! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface;
26 //! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator;
27 //! type ChannelMonitor = lightning::ln::channelmonitor::SimpleManyChannelMonitor<lightning::chain::transaction::OutPoint, lightning::chain::keysinterface::InMemoryChannelKeys, Arc<TxBroadcaster>, Arc<FeeEstimator>>;
28 //! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<ChannelMonitor, TxBroadcaster, FeeEstimator>;
29 //! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChannelMonitor, TxBroadcaster, FeeEstimator>;
31 //! // Connect to node with pubkey their_node_id at addr:
32 //! async fn connect_to_node(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
33 //! let (sender, mut receiver) = mpsc::channel(2);
34 //! lightning_net_tokio::connect_outbound(peer_manager, sender, their_node_id, addr).await;
36 //! receiver.recv().await;
37 //! for _event in channel_manager.get_and_clear_pending_events().drain(..) {
38 //! // Handle the event!
40 //! for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
41 //! // Handle the event!
46 //! // Begin reading from a newly accepted socket and talk to the peer:
47 //! async fn accept_socket(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
48 //! let (sender, mut receiver) = mpsc::channel(2);
49 //! lightning_net_tokio::setup_inbound(peer_manager, sender, socket);
51 //! receiver.recv().await;
52 //! for _event in channel_manager.get_and_clear_pending_events().drain(..) {
53 //! // Handle the event!
55 //! for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
56 //! // Handle the event!
62 use secp256k1::key::PublicKey;
64 use tokio::net::TcpStream;
65 use tokio::{io, time};
66 use tokio::sync::mpsc;
67 use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
69 use lightning::ln::peer_handler;
70 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
71 use lightning::ln::msgs::ChannelMessageHandler;
74 use std::net::SocketAddr;
75 use std::sync::{Arc, Mutex, MutexGuard};
76 use std::sync::atomic::{AtomicU64, Ordering};
77 use std::time::Duration;
80 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
82 /// Connection contains all our internal state for a connection - we hold a reference to the
83 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
84 /// read future (which is returned by schedule_read).
86 writer: Option<io::WriteHalf<TcpStream>>,
87 event_notify: mpsc::Sender<()>,
88 // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
89 // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
90 // between being woken up with write-ready and calling PeerManager::write_buffer_spce_avail.
91 // This provides that indirection, with a Sender which gets handed to the PeerManager Arc on
92 // the schedule_read stack.
94 // An alternative (likely more effecient) approach would involve creating a RawWakerVTable at
95 // runtime with functions templated by the Arc<PeerManager> type, calling
96 // write_buffer_space_avail directly from tokio's write wake, however doing so would require
97 // more unsafe voodo than I really feel like writing.
98 write_avail: mpsc::Sender<()>,
99 // When we are told by rust-lightning to pause read (because we have writes backing up), we do
100 // so by setting read_paused. At that point, the read task will stop reading bytes from the
101 // socket. To wake it up (without otherwise changing its state, we can push a value into this
103 read_waker: mpsc::Sender<()>,
105 rl_requested_disconnect: bool,
109 fn event_trigger(us: &mut MutexGuard<Self>) {
110 match us.event_notify.try_send(()) {
112 Err(mpsc::error::TrySendError::Full(_)) => {
113 // Ignore full errors as we just need the user to poll after this point, so if they
114 // haven't received the last send yet, it doesn't matter.
119 async fn schedule_read<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) {
120 let peer_manager_ref = peer_manager.clone();
121 // 8KB is nice and big but also should never cause any issues with stack overflowing.
122 let mut buf = [0; 8192];
124 let mut our_descriptor = SocketDescriptor::new(us.clone());
125 // An enum describing why we did/are disconnecting:
127 // Rust-Lightning told us to disconnect, either by returning an Err or by calling
128 // SocketDescriptor::disconnect_socket.
129 // In this case, we do not call peer_manager.socket_disconnected() as Rust-Lightning
130 // already knows we're disconnected.
132 // The connection was disconnected for some other reason, ie because the socket was
134 // In this case, we do need to call peer_manager.socket_disconnected() to inform
135 // Rust-Lightning that the socket is gone.
138 let disconnect_type = loop {
139 macro_rules! shutdown_socket {
140 ($err: expr, $need_disconnect: expr) => { {
141 println!("Disconnecting peer due to {}!", $err);
142 break $need_disconnect;
146 let read_paused = us.lock().unwrap().read_paused;
148 v = write_avail_receiver.recv() => {
149 assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
150 if us.lock().unwrap().rl_requested_disconnect {
151 shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection);
153 if let Err(e) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
154 shutdown_socket!(e, Disconnect::CloseConnection);
157 _ = read_wake_receiver.recv() => {},
158 read = reader.read(&mut buf), if !read_paused => match read {
159 Ok(0) => shutdown_socket!("Connection closed", Disconnect::PeerDisconnected),
161 if us.lock().unwrap().rl_requested_disconnect {
162 shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection);
164 let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
167 let mut us_lock = us.lock().unwrap();
169 us_lock.read_paused = true;
171 Self::event_trigger(&mut us_lock);
173 Err(e) => shutdown_socket!(e, Disconnect::CloseConnection),
176 Err(e) => shutdown_socket!(e, Disconnect::PeerDisconnected),
180 let writer_option = us.lock().unwrap().writer.take();
181 if let Some(mut writer) = writer_option {
182 // If the socket is already closed, shutdown() will fail, so just ignore it.
183 let _ = writer.shutdown().await;
185 if let Disconnect::PeerDisconnected = disconnect_type {
186 peer_manager_ref.socket_disconnected(&our_descriptor);
187 Self::event_trigger(&mut us.lock().unwrap());
191 fn new(event_notify: mpsc::Sender<()>, stream: TcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
192 // We only ever need a channel of depth 1 here: if we returned a non-full write to the
193 // PeerManager, we will eventually get notified that there is room in the socket to write
194 // new bytes, which will generate an event. That event will be popped off the queue before
195 // we call write_buffer_space_avail, ensuring that we have room to push a new () if, during
196 // the write_buffer_space_avail() call, send_data() returns a non-full write.
197 let (write_avail, write_receiver) = mpsc::channel(1);
198 // Similarly here - our only goal is to make sure the reader wakes up at some point after
199 // we shove a value into the channel which comes after we've reset the read_paused bool to
201 let (read_waker, read_receiver) = mpsc::channel(1);
202 let (reader, writer) = io::split(stream);
204 (reader, write_receiver, read_receiver,
205 Arc::new(Mutex::new(Self {
206 writer: Some(writer), event_notify, write_avail, read_waker,
207 read_paused: false, rl_requested_disconnect: false,
208 id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
213 /// Process incoming messages and feed outgoing messages on the provided socket generated by
214 /// accepting an incoming connection.
216 /// The returned future will complete when the peer is disconnected and associated handling
217 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
218 /// not need to poll the provided future in order to make progress.
220 /// See the module-level documentation for how to handle the event_notify mpsc::Sender.
221 pub fn setup_inbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, stream: TcpStream) -> impl std::future::Future<Output=()> {
222 let (reader, write_receiver, read_receiver, us) = Connection::new(event_notify, stream);
223 #[cfg(debug_assertions)]
224 let last_us = Arc::clone(&us);
226 let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone())) {
227 Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
229 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
235 if let Some(handle) = handle_opt {
236 if let Err(e) = handle.await {
237 assert!(e.is_cancelled());
239 // This is certainly not guaranteed to always be true - the read loop may exit
240 // while there are still pending write wakers that need to be woken up after the
241 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
242 // keep too many wakers around, this makes sense. The race should be rare (we do
243 // some work after shutdown()) and an error would be a major memory leak.
244 #[cfg(debug_assertions)]
245 assert!(Arc::try_unwrap(last_us).is_ok());
251 /// Process incoming messages and feed outgoing messages on the provided socket generated by
252 /// making an outbound connection which is expected to be accepted by a peer with the given
253 /// public key. The relevant processing is set to run free (via tokio::spawn).
255 /// The returned future will complete when the peer is disconnected and associated handling
256 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
257 /// not need to poll the provided future in order to make progress.
259 /// See the module-level documentation for how to handle the event_notify mpsc::Sender.
260 pub fn setup_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: TcpStream) -> impl std::future::Future<Output=()> {
261 let (reader, write_receiver, read_receiver, us) = Connection::new(event_notify, stream);
262 #[cfg(debug_assertions)]
263 let last_us = Arc::clone(&us);
265 let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) {
266 Some(tokio::spawn(async move {
267 if SocketDescriptor::new(us.clone()).send_data(&initial_send, true) != initial_send.len() {
268 // We should essentially always have enough room in a TCP socket buffer to send the
269 // initial 10s of bytes, if not, just give up as hopeless.
270 eprintln!("Failed to write first full message to socket!");
271 peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us)));
273 Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver).await;
277 // Note that we will skip socket_disconnected here, in accordance with the PeerManager
283 if let Some(handle) = handle_opt {
284 if let Err(e) = handle.await {
285 assert!(e.is_cancelled());
287 // This is certainly not guaranteed to always be true - the read loop may exit
288 // while there are still pending write wakers that need to be woken up after the
289 // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
290 // keep too many wakers around, this makes sense. The race should be rare (we do
291 // some work after shutdown()) and an error would be a major memory leak.
292 #[cfg(debug_assertions)]
293 assert!(Arc::try_unwrap(last_us).is_ok());
299 /// Process incoming messages and feed outgoing messages on a new connection made to the given
300 /// socket address which is expected to be accepted by a peer with the given public key (by
301 /// scheduling futures with tokio::spawn).
303 /// Shorthand for TcpStream::connect(addr) with a timeout followed by setup_outbound().
305 /// Returns a future (as the fn is async) which needs to be polled to complete the connection and
306 /// connection setup. That future then returns a future which will complete when the peer is
307 /// disconnected and associated handling futures are freed, though, because all processing in said
308 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
311 /// See the module-level documentation for how to handle the event_notify mpsc::Sender.
312 pub async fn connect_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> {
313 if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), TcpStream::connect(&addr)).await {
314 Some(setup_outbound(peer_manager, event_notify, their_node_id, stream))
318 const SOCK_WAKER_VTABLE: task::RawWakerVTable =
319 task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
321 fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
322 write_avail_to_waker(orig_ptr as *const mpsc::Sender<()>)
324 // When waking, an error should be fine. Most likely we got two send_datas in a row, both of which
325 // failed to fully write, but we only need to call write_buffer_space_avail() once. Otherwise, the
326 // sending thread may have already gone away due to a socket close, in which case there's nothing
327 // to wake up anyway.
328 fn wake_socket_waker(orig_ptr: *const ()) {
329 let sender = unsafe { &mut *(orig_ptr as *mut mpsc::Sender<()>) };
330 let _ = sender.try_send(());
331 drop_socket_waker(orig_ptr);
333 fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
334 let sender_ptr = orig_ptr as *const mpsc::Sender<()>;
335 let mut sender = unsafe { (*sender_ptr).clone() };
336 let _ = sender.try_send(());
338 fn drop_socket_waker(orig_ptr: *const ()) {
339 let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
340 // _orig_box is now dropped
342 fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker {
343 let new_box = Box::leak(Box::new(unsafe { (*sender).clone() }));
344 let new_ptr = new_box as *const mpsc::Sender<()>;
345 task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
348 /// The SocketDescriptor used to refer to sockets by a PeerHandler. This is pub only as it is a
349 /// type in the template of PeerHandler.
350 pub struct SocketDescriptor {
351 conn: Arc<Mutex<Connection>>,
354 impl SocketDescriptor {
355 fn new(conn: Arc<Mutex<Connection>>) -> Self {
356 let id = conn.lock().unwrap().id;
360 impl peer_handler::SocketDescriptor for SocketDescriptor {
361 fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
362 // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
363 // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
364 // a SocketDescriptor in it which can wake up the write_avail Sender, waking up the
365 // processing future which will call write_buffer_space_avail and we'll end up back here.
366 let mut us = self.conn.lock().unwrap();
367 if us.writer.is_none() {
368 // The writer gets take()n when it is time to shut down, so just fast-return 0 here.
372 if resume_read && us.read_paused {
373 // The schedule_read future may go to lock up but end up getting woken up by there
374 // being more room in the write buffer, dropping the other end of this Sender
375 // before we get here, so we ignore any failures to wake it up.
376 us.read_paused = false;
377 let _ = us.read_waker.try_send(());
379 if data.is_empty() { return 0; }
380 let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&us.write_avail)) };
381 let mut ctx = task::Context::from_waker(&waker);
382 let mut written_len = 0;
384 match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
385 task::Poll::Ready(Ok(res)) => {
386 // The tokio docs *seem* to indicate this can't happen, and I certainly don't
387 // know how to handle it if it does (cause it should be a Poll::Pending
391 if written_len == data.len() { return written_len; }
393 task::Poll::Ready(Err(e)) => {
394 // The tokio docs *seem* to indicate this can't happen, and I certainly don't
395 // know how to handle it if it does (cause it should be a Poll::Pending
397 assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
398 // Probably we've already been closed, just return what we have and let the
399 // read thread handle closing logic.
402 task::Poll::Pending => {
403 // We're queued up for a write event now, but we need to make sure we also
404 // pause read given we're now waiting on the remote end to ACK (and in
405 // accordance with the send_data() docs).
406 us.read_paused = true;
413 fn disconnect_socket(&mut self) {
414 let mut us = self.conn.lock().unwrap();
415 us.rl_requested_disconnect = true;
416 us.read_paused = true;
417 // Wake up the sending thread, assuming it is still alive
418 let _ = us.write_avail.try_send(());
419 // TODO: There's a race where we don't meet the requirements of disconnect_socket if the
420 // read task is about to call a PeerManager function (eg read_event or write_event).
421 // Ideally we need to release the us lock and block until we have confirmation from the
422 // read task that it has broken out of its main loop.
425 impl Clone for SocketDescriptor {
426 fn clone(&self) -> Self {
428 conn: Arc::clone(&self.conn),
433 impl Eq for SocketDescriptor {}
434 impl PartialEq for SocketDescriptor {
435 fn eq(&self, o: &Self) -> bool {
439 impl Hash for SocketDescriptor {
440 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
447 use lightning::ln::features::*;
448 use lightning::ln::msgs::*;
449 use lightning::ln::peer_handler::{MessageHandler, PeerManager};
450 use lightning::util::events::*;
451 use secp256k1::{Secp256k1, SecretKey, PublicKey};
453 use tokio::sync::mpsc;
456 use std::sync::{Arc, Mutex};
457 use std::time::Duration;
459 pub struct TestLogger();
460 impl lightning::util::logger::Logger for TestLogger {
461 fn log(&self, record: &lightning::util::logger::Record) {
462 println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
467 expected_pubkey: PublicKey,
468 pubkey_connected: mpsc::Sender<()>,
469 pubkey_disconnected: mpsc::Sender<()>,
470 msg_events: Mutex<Vec<MessageSendEvent>>,
472 impl RoutingMessageHandler for MsgHandler {
473 fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
474 fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
475 fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
476 fn handle_htlc_fail_channel_update(&self, _update: &HTLCFailChannelUpdate) { }
477 fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, ChannelUpdate, ChannelUpdate)> { Vec::new() }
478 fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<NodeAnnouncement> { Vec::new() }
479 fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { false }
481 impl ChannelMessageHandler for MsgHandler {
482 fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &OpenChannel) {}
483 fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &AcceptChannel) {}
484 fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {}
485 fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {}
486 fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &FundingLocked) {}
487 fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &Shutdown) {}
488 fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {}
489 fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {}
490 fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFulfillHTLC) {}
491 fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailHTLC) {}
492 fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFailMalformedHTLC) {}
493 fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &CommitmentSigned) {}
494 fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &RevokeAndACK) {}
495 fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
496 fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
497 fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
498 if *their_node_id == self.expected_pubkey {
499 self.pubkey_disconnected.clone().try_send(()).unwrap();
502 fn peer_connected(&self, their_node_id: &PublicKey, _msg: &Init) {
503 if *their_node_id == self.expected_pubkey {
504 self.pubkey_connected.clone().try_send(()).unwrap();
507 fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &ChannelReestablish) {}
508 fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {}
510 impl MessageSendEventsProvider for MsgHandler {
511 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
512 let mut ret = Vec::new();
513 mem::swap(&mut *self.msg_events.lock().unwrap(), &mut ret);
518 #[tokio::test(threaded_scheduler)]
519 async fn basic_connection_test() {
520 let secp_ctx = Secp256k1::new();
521 let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
522 let b_key = SecretKey::from_slice(&[1; 32]).unwrap();
523 let a_pub = PublicKey::from_secret_key(&secp_ctx, &a_key);
524 let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key);
526 let (a_connected_sender, mut a_connected) = mpsc::channel(1);
527 let (a_disconnected_sender, mut a_disconnected) = mpsc::channel(1);
528 let a_handler = Arc::new(MsgHandler {
529 expected_pubkey: b_pub,
530 pubkey_connected: a_connected_sender,
531 pubkey_disconnected: a_disconnected_sender,
532 msg_events: Mutex::new(Vec::new()),
534 let a_manager = Arc::new(PeerManager::new(MessageHandler {
535 chan_handler: Arc::clone(&a_handler),
536 route_handler: Arc::clone(&a_handler) as Arc<dyn RoutingMessageHandler>,
537 }, a_key.clone(), &[1; 32], Arc::new(TestLogger())));
539 let (b_connected_sender, mut b_connected) = mpsc::channel(1);
540 let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1);
541 let b_handler = Arc::new(MsgHandler {
542 expected_pubkey: a_pub,
543 pubkey_connected: b_connected_sender,
544 pubkey_disconnected: b_disconnected_sender,
545 msg_events: Mutex::new(Vec::new()),
547 let b_manager = Arc::new(PeerManager::new(MessageHandler {
548 chan_handler: Arc::clone(&b_handler),
549 route_handler: Arc::clone(&b_handler) as Arc<dyn RoutingMessageHandler>,
550 }, b_key.clone(), &[2; 32], Arc::new(TestLogger())));
552 // We bind on localhost, hoping the environment is properly configured with a local
553 // address. This may not always be the case in containers and the like, so if this test is
554 // failing for you check that you have a loopback interface and it is configured with
556 let (conn_a, conn_b) = if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") {
557 (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0)
558 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") {
559 (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0)
560 } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") {
561 (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0)
562 } else { panic!("Failed to bind to v4 localhost on common ports"); };
564 let (sender, _receiver) = mpsc::channel(2);
565 let fut_a = super::setup_outbound(Arc::clone(&a_manager), sender.clone(), b_pub, tokio::net::TcpStream::from_std(conn_a).unwrap());
566 let fut_b = super::setup_inbound(b_manager, sender, tokio::net::TcpStream::from_std(conn_b).unwrap());
568 tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap();
569 tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap();
571 a_handler.msg_events.lock().unwrap().push(MessageSendEvent::HandleError {
572 node_id: b_pub, action: ErrorAction::DisconnectPeer { msg: None }
574 assert!(a_disconnected.try_recv().is_err());
575 assert!(b_disconnected.try_recv().is_err());
577 a_manager.process_events();
578 tokio::time::timeout(Duration::from_secs(10), a_disconnected.recv()).await.unwrap();
579 tokio::time::timeout(Duration::from_secs(1), b_disconnected.recv()).await.unwrap();