Stop printing to stderr in lightning-net-tokio for disconnections
[rust-lightning] / lightning-net-tokio / src / lib.rs
index e460df25e54d192e92548feb5b6053715798fd27..d2ee100281eed222fd0a172ab227606318e8aaca 100644 (file)
@@ -1,64 +1,76 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
 //! A socket handling library for those running in Tokio environments who wish to use
 //! rust-lightning with native TcpStreams.
 //!
 //! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a
 //! TcpStream and a reference to a PeerManager and the rest is handled", except for the
-//! [Event](../lightning/util/events/enum.Event.html) handlng mechanism, see below.
+//! [Event](../lightning/util/events/enum.Event.html) handling mechanism; see example below.
 //!
 //! The PeerHandler, due to the fire-and-forget nature of this logic, must be an Arc, and must use
 //! the SocketDescriptor provided here as the PeerHandler's SocketDescriptor.
 //!
-//! Three methods are exposed to register a new connection for handling in tokio::spawn calls, see
-//! their individual docs for more. All three take a
-//! [mpsc::Sender<()>](../tokio/sync/mpsc/struct.Sender.html) which is sent into every time
-//! something occurs which may result in lightning [Events](../lightning/util/events/enum.Event.html).
-//! The call site should, thus, look something like this:
+//! Three methods are exposed to register a new connection for handling in tokio::spawn calls; see
+//! their individual docs for details.
+//!
+//! # Example
 //! ```
-//! use tokio::sync::mpsc;
-//! use tokio::net::TcpStream;
+//! use std::net::TcpStream;
 //! use bitcoin::secp256k1::key::PublicKey;
-//! use lightning::util::events::EventsProvider;
+//! use lightning::util::events::{Event, EventHandler, EventsProvider};
 //! use std::net::SocketAddr;
 //! use std::sync::Arc;
 //!
 //! // Define concrete types for our high-level objects:
-//! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface;
-//! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator;
-//! type ChannelMonitor = lightning::ln::channelmonitor::SimpleManyChannelMonitor<lightning::chain::transaction::OutPoint, lightning::chain::keysinterface::InMemoryChannelKeys, Arc<TxBroadcaster>, Arc<FeeEstimator>>;
-//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<ChannelMonitor, TxBroadcaster, FeeEstimator>;
-//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChannelMonitor, TxBroadcaster, FeeEstimator>;
+//! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
+//! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
+//! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
+//! type ChainAccess = dyn lightning::chain::Access + Send + Sync;
+//! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
+//! type DataPersister = dyn lightning::chain::channelmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
+//! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
+//! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
+//! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>>;
 //!
 //! // Connect to node with pubkey their_node_id at addr:
-//! async fn connect_to_node(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
-//!     let (sender, mut receiver) = mpsc::channel(2);
-//!     lightning_net_tokio::connect_outbound(peer_manager, sender, their_node_id, addr).await;
-//!     loop {
-//!         receiver.recv().await;
-//!         for _event in channel_manager.get_and_clear_pending_events().drain(..) {
-//!             // Handle the event!
-//!         }
-//!         for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
-//!             // Handle the event!
-//!         }
-//!     }
+//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
+//!    lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await;
+//!    loop {
+//!            channel_manager.await_persistable_update();
+//!            channel_manager.process_pending_events(&|event| {
+//!                    // Handle the event!
+//!            });
+//!            chain_monitor.process_pending_events(&|event| {
+//!                    // Handle the event!
+//!            });
+//!    }
 //! }
 //!
 //! // Begin reading from a newly accepted socket and talk to the peer:
-//! async fn accept_socket(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
-//!     let (sender, mut receiver) = mpsc::channel(2);
-//!     lightning_net_tokio::setup_inbound(peer_manager, sender, socket);
-//!     loop {
-//!         receiver.recv().await;
-//!         for _event in channel_manager.get_and_clear_pending_events().drain(..) {
-//!             // Handle the event!
-//!         }
-//!         for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
-//!             // Handle the event!
-//!         }
-//!     }
+//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
+//!    lightning_net_tokio::setup_inbound(peer_manager, socket);
+//!    loop {
+//!            channel_manager.await_persistable_update();
+//!            channel_manager.process_pending_events(&|event| {
+//!                    // Handle the event!
+//!            });
+//!            chain_monitor.process_pending_events(&|event| {
+//!                    // Handle the event!
+//!            });
+//!    }
 //! }
 //! ```
 
+#![deny(broken_intra_doc_links)]
+#![deny(missing_docs)]
+
 use bitcoin::secp256k1::key::PublicKey;
 
 use tokio::net::TcpStream;
@@ -68,11 +80,13 @@ use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
 
 use lightning::ln::peer_handler;
 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
-use lightning::ln::msgs::ChannelMessageHandler;
+use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+use lightning::util::logger::Logger;
 
-use std::{task, thread};
+use std::task;
 use std::net::SocketAddr;
-use std::sync::{Arc, Mutex, MutexGuard};
+use std::net::TcpStream as StdTcpStream;
+use std::sync::{Arc, Mutex};
 use std::sync::atomic::{AtomicU64, Ordering};
 use std::time::Duration;
 use std::hash::Hash;
@@ -84,10 +98,9 @@ static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
 /// read future (which is returned by schedule_read).
 struct Connection {
        writer: Option<io::WriteHalf<TcpStream>>,
-       event_notify: mpsc::Sender<()>,
        // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
        // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
-       // between being woken up with write-ready and calling PeerManager::write_buffer_spce_avail.
+       // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
        // This provides that indirection, with a Sender which gets handed to the PeerManager Arc on
        // the schedule_read stack.
        //
@@ -101,28 +114,15 @@ struct Connection {
        // socket. To wake it up (without otherwise changing its state, we can push a value into this
        // Sender.
        read_waker: mpsc::Sender<()>,
-       // When we are told by rust-lightning to disconnect, we can't return to rust-lightning until we
-       // are sure we won't call any more read/write PeerManager functions with the same connection.
-       // This is set to true if we're in such a condition (with disconnect checked before with the
-       // top-level mutex held) and false when we can return.
-       block_disconnect_socket: bool,
        read_paused: bool,
        rl_requested_disconnect: bool,
        id: u64,
 }
 impl Connection {
-       fn event_trigger(us: &mut MutexGuard<Self>) {
-               match us.event_notify.try_send(()) {
-                       Ok(_) => {},
-                       Err(mpsc::error::TrySendError::Full(_)) => {
-                               // Ignore full errors as we just need the user to poll after this point, so if they
-                               // haven't received the last send yet, it doesn't matter.
-                       },
-                       _ => panic!()
-               }
-       }
-       async fn schedule_read<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) {
-               let peer_manager_ref = peer_manager.clone();
+       async fn schedule_read<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
+                       CMH: ChannelMessageHandler + 'static,
+                       RMH: RoutingMessageHandler + 'static,
+                       L: Logger + 'static + ?Sized {
                // 8KB is nice and big but also should never cause any issues with stack overflowing.
                let mut buf = [0; 8192];
 
@@ -139,40 +139,26 @@ impl Connection {
                        // In this case, we do need to call peer_manager.socket_disconnected() to inform
                        // Rust-Lightning that the socket is gone.
                        PeerDisconnected
-               };
+               }
                let disconnect_type = loop {
-                       macro_rules! shutdown_socket {
-                               ($err: expr, $need_disconnect: expr) => { {
-                                       println!("Disconnecting peer due to {}!", $err);
-                                       break $need_disconnect;
-                               } }
-                       }
-
-                       macro_rules! prepare_read_write_call {
-                               () => { {
-                                       let mut us_lock = us.lock().unwrap();
-                                       if us_lock.rl_requested_disconnect {
-                                               shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection);
-                                       }
-                                       us_lock.block_disconnect_socket = true;
-                               } }
-                       }
-
-                       let read_paused = us.lock().unwrap().read_paused;
+                       let read_paused = {
+                               let us_lock = us.lock().unwrap();
+                               if us_lock.rl_requested_disconnect {
+                                       break Disconnect::CloseConnection;
+                               }
+                               us_lock.read_paused
+                       };
                        tokio::select! {
                                v = write_avail_receiver.recv() => {
                                        assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
-                                       prepare_read_write_call!();
-                                       if let Err(e) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
-                                               shutdown_socket!(e, Disconnect::CloseConnection);
+                                       if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
+                                               break Disconnect::CloseConnection;
                                        }
-                                       us.lock().unwrap().block_disconnect_socket = false;
                                },
                                _ = read_wake_receiver.recv() => {},
                                read = reader.read(&mut buf), if !read_paused => match read {
-                                       Ok(0) => shutdown_socket!("Connection closed", Disconnect::PeerDisconnected),
+                                       Ok(0) => break Disconnect::PeerDisconnected,
                                        Ok(len) => {
-                                               prepare_read_write_call!();
                                                let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
                                                let mut us_lock = us.lock().unwrap();
                                                match read_res {
@@ -180,15 +166,14 @@ impl Connection {
                                                                if pause_read {
                                                                        us_lock.read_paused = true;
                                                                }
-                                                               Self::event_trigger(&mut us_lock);
                                                        },
-                                                       Err(e) => shutdown_socket!(e, Disconnect::CloseConnection),
+                                                       Err(_) => break Disconnect::CloseConnection,
                                                }
-                                               us_lock.block_disconnect_socket = false;
                                        },
-                                       Err(e) => shutdown_socket!(e, Disconnect::PeerDisconnected),
+                                       Err(_) => break Disconnect::PeerDisconnected,
                                },
                        }
+                       peer_manager.process_events();
                };
                let writer_option = us.lock().unwrap().writer.take();
                if let Some(mut writer) = writer_option {
@@ -196,12 +181,12 @@ impl Connection {
                        let _ = writer.shutdown().await;
                }
                if let Disconnect::PeerDisconnected = disconnect_type {
-                       peer_manager_ref.socket_disconnected(&our_descriptor);
-                       Self::event_trigger(&mut us.lock().unwrap());
+                       peer_manager.socket_disconnected(&our_descriptor);
+                       peer_manager.process_events();
                }
        }
 
-       fn new(event_notify: mpsc::Sender<()>, stream: TcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
+       fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
                // We only ever need a channel of depth 1 here: if we returned a non-full write to the
                // PeerManager, we will eventually get notified that there is room in the socket to write
                // new bytes, which will generate an event. That event will be popped off the queue before
@@ -212,12 +197,13 @@ impl Connection {
                // we shove a value into the channel which comes after we've reset the read_paused bool to
                // false.
                let (read_waker, read_receiver) = mpsc::channel(1);
-               let (reader, writer) = io::split(stream);
+               stream.set_nonblocking(true).unwrap();
+               let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
 
                (reader, write_receiver, read_receiver,
                Arc::new(Mutex::new(Self {
-                       writer: Some(writer), event_notify, write_avail, read_waker, read_paused: false,
-                       block_disconnect_socket: false, rl_requested_disconnect: false,
+                       writer: Some(writer), write_avail, read_waker, read_paused: false,
+                       rl_requested_disconnect: false,
                        id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
                })))
        }
@@ -229,10 +215,11 @@ impl Connection {
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-///
-/// See the module-level documentation for how to handle the event_notify mpsc::Sender.
-pub fn setup_inbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, stream: TcpStream) -> impl std::future::Future<Output=()> {
-       let (reader, write_receiver, read_receiver, us) = Connection::new(event_notify, stream);
+pub fn setup_inbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+               CMH: ChannelMessageHandler + 'static + Send + Sync,
+               RMH: RoutingMessageHandler + 'static + Send + Sync,
+               L: Logger + 'static + ?Sized + Send + Sync {
+       let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
 
@@ -268,10 +255,11 @@ pub fn setup_inbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<pee
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-///
-/// See the module-level documentation for how to handle the event_notify mpsc::Sender.
-pub fn setup_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: TcpStream) -> impl std::future::Future<Output=()> {
-       let (reader, mut write_receiver, read_receiver, us) = Connection::new(event_notify, stream);
+pub fn setup_outbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+               CMH: ChannelMessageHandler + 'static + Send + Sync,
+               RMH: RoutingMessageHandler + 'static + Send + Sync,
+               L: Logger + 'static + ?Sized + Send + Sync {
+       let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
 
@@ -337,11 +325,12 @@ pub fn setup_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<pe
 /// disconnected and associated handling futures are freed, though, because all processing in said
 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
 /// make progress.
-///
-/// See the module-level documentation for how to handle the event_notify mpsc::Sender.
-pub async fn connect_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> {
-       if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), TcpStream::connect(&addr)).await {
-               Some(setup_outbound(peer_manager, event_notify, their_node_id, stream))
+pub async fn connect_outbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
+               CMH: ChannelMessageHandler + 'static + Send + Sync,
+               RMH: RoutingMessageHandler + 'static + Send + Sync,
+               L: Logger + 'static + ?Sized + Send + Sync {
+       if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
+               Some(setup_outbound(peer_manager, their_node_id, stream))
        } else { None }
 }
 
@@ -362,7 +351,7 @@ fn wake_socket_waker(orig_ptr: *const ()) {
 }
 fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
        let sender_ptr = orig_ptr as *const mpsc::Sender<()>;
-       let mut sender = unsafe { (*sender_ptr).clone() };
+       let sender = unsafe { (*sender_ptr).clone() };
        let _ = sender.try_send(());
 }
 fn drop_socket_waker(orig_ptr: *const ()) {
@@ -441,18 +430,10 @@ impl peer_handler::SocketDescriptor for SocketDescriptor {
        }
 
        fn disconnect_socket(&mut self) {
-               {
-                       let mut us = self.conn.lock().unwrap();
-                       us.rl_requested_disconnect = true;
-                       us.read_paused = true;
-                       // Wake up the sending thread, assuming it is still alive
-                       let _ = us.write_avail.try_send(());
-                       // Happy-path return:
-                       if !us.block_disconnect_socket { return; }
-               }
-               while self.conn.lock().unwrap().block_disconnect_socket {
-                       thread::yield_now();
-               }
+               let mut us = self.conn.lock().unwrap();
+               us.rl_requested_disconnect = true;
+               // Wake up the sending thread, assuming it is still alive
+               let _ = us.write_avail.try_send(());
        }
 }
 impl Clone for SocketDescriptor {
@@ -486,6 +467,7 @@ mod tests {
        use tokio::sync::mpsc;
 
        use std::mem;
+       use std::sync::atomic::{AtomicBool, Ordering};
        use std::sync::{Arc, Mutex};
        use std::time::Duration;
 
@@ -500,6 +482,7 @@ mod tests {
                expected_pubkey: PublicKey,
                pubkey_connected: mpsc::Sender<()>,
                pubkey_disconnected: mpsc::Sender<()>,
+               disconnected_flag: AtomicBool,
                msg_events: Mutex<Vec<MessageSendEvent>>,
        }
        impl RoutingMessageHandler for MsgHandler {
@@ -509,7 +492,11 @@ mod tests {
                fn handle_htlc_fail_channel_update(&self, _update: &HTLCFailChannelUpdate) { }
                fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { Vec::new() }
                fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<NodeAnnouncement> { Vec::new() }
-               fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { false }
+               fn sync_routing_table(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
+               fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
+               fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
+               fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
+               fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
        }
        impl ChannelMessageHandler for MsgHandler {
                fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &OpenChannel) {}
@@ -517,7 +504,7 @@ mod tests {
                fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {}
                fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {}
                fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &FundingLocked) {}
-               fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &Shutdown) {}
+               fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, _msg: &Shutdown) {}
                fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {}
                fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {}
                fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFulfillHTLC) {}
@@ -527,8 +514,10 @@ mod tests {
                fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &RevokeAndACK) {}
                fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
                fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
+               fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
                fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
                        if *their_node_id == self.expected_pubkey {
+                               self.disconnected_flag.store(true, Ordering::SeqCst);
                                self.pubkey_disconnected.clone().try_send(()).unwrap();
                        }
                }
@@ -561,11 +550,12 @@ mod tests {
                        expected_pubkey: b_pub,
                        pubkey_connected: a_connected_sender,
                        pubkey_disconnected: a_disconnected_sender,
+                       disconnected_flag: AtomicBool::new(false),
                        msg_events: Mutex::new(Vec::new()),
                });
                let a_manager = Arc::new(PeerManager::new(MessageHandler {
                        chan_handler: Arc::clone(&a_handler),
-                       route_handler: Arc::clone(&a_handler) as Arc<dyn RoutingMessageHandler>,
+                       route_handler: Arc::clone(&a_handler),
                }, a_key.clone(), &[1; 32], Arc::new(TestLogger())));
 
                let (b_connected_sender, mut b_connected) = mpsc::channel(1);
@@ -574,11 +564,12 @@ mod tests {
                        expected_pubkey: a_pub,
                        pubkey_connected: b_connected_sender,
                        pubkey_disconnected: b_disconnected_sender,
+                       disconnected_flag: AtomicBool::new(false),
                        msg_events: Mutex::new(Vec::new()),
                });
                let b_manager = Arc::new(PeerManager::new(MessageHandler {
                        chan_handler: Arc::clone(&b_handler),
-                       route_handler: Arc::clone(&b_handler) as Arc<dyn RoutingMessageHandler>,
+                       route_handler: Arc::clone(&b_handler),
                }, b_key.clone(), &[2; 32], Arc::new(TestLogger())));
 
                // We bind on localhost, hoping the environment is properly configured with a local
@@ -593,9 +584,8 @@ mod tests {
                        (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0)
                } else { panic!("Failed to bind to v4 localhost on common ports"); };
 
-               let (sender, _receiver) = mpsc::channel(2);
-               let fut_a = super::setup_outbound(Arc::clone(&a_manager), sender.clone(), b_pub, tokio::net::TcpStream::from_std(conn_a).unwrap());
-               let fut_b = super::setup_inbound(b_manager, sender, tokio::net::TcpStream::from_std(conn_b).unwrap());
+               let fut_a = super::setup_outbound(Arc::clone(&a_manager), b_pub, conn_a);
+               let fut_b = super::setup_inbound(b_manager, conn_b);
 
                tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap();
                tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap();
@@ -603,18 +593,20 @@ mod tests {
                a_handler.msg_events.lock().unwrap().push(MessageSendEvent::HandleError {
                        node_id: b_pub, action: ErrorAction::DisconnectPeer { msg: None }
                });
-               assert!(a_disconnected.try_recv().is_err());
-               assert!(b_disconnected.try_recv().is_err());
+               assert!(!a_handler.disconnected_flag.load(Ordering::SeqCst));
+               assert!(!b_handler.disconnected_flag.load(Ordering::SeqCst));
 
                a_manager.process_events();
                tokio::time::timeout(Duration::from_secs(10), a_disconnected.recv()).await.unwrap();
                tokio::time::timeout(Duration::from_secs(1), b_disconnected.recv()).await.unwrap();
+               assert!(a_handler.disconnected_flag.load(Ordering::SeqCst));
+               assert!(b_handler.disconnected_flag.load(Ordering::SeqCst));
 
                fut_a.await;
                fut_b.await;
        }
 
-       #[tokio::test(threaded_scheduler)]
+       #[tokio::test(flavor = "multi_thread")]
        async fn basic_threaded_connection_test() {
                do_basic_connection_test().await;
        }