[net-tokio] Call PeerManager::process_events without blocking reads
[rust-lightning] / lightning-net-tokio / src / lib.rs
index 5f5fece0d2ed34e447e2905f9f3bd6d3f42ce102..cee7c5c1b982882244cdb33a5d82d7abb0c8eadc 100644 (file)
@@ -34,7 +34,7 @@
 //! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
 //! type ChainAccess = dyn lightning::chain::Access + Send + Sync;
 //! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
-//! type DataPersister = dyn lightning::chain::channelmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
+//! type DataPersister = dyn lightning::chain::chainmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
 //! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
 //! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
 //! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>>;
 //! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
 //!    lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await;
 //!    loop {
-//!            channel_manager.await_persistable_update();
-//!            channel_manager.process_pending_events(&|event| {
-//!                    // Handle the event!
-//!            });
-//!            chain_monitor.process_pending_events(&|event| {
+//!            let event_handler = |event: &Event| {
 //!                    // Handle the event!
-//!            });
+//!            };
+//!            channel_manager.await_persistable_update();
+//!            channel_manager.process_pending_events(&event_handler);
+//!            chain_monitor.process_pending_events(&event_handler);
 //!    }
 //! }
 //!
 //! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
 //!    lightning_net_tokio::setup_inbound(peer_manager, socket);
 //!    loop {
-//!            channel_manager.await_persistable_update();
-//!            channel_manager.process_pending_events(&|event| {
-//!                    // Handle the event!
-//!            });
-//!            chain_monitor.process_pending_events(&|event| {
+//!            let event_handler = |event: &Event| {
 //!                    // Handle the event!
-//!            });
+//!            };
+//!            channel_manager.await_persistable_update();
+//!            channel_manager.process_pending_events(&event_handler);
+//!            chain_monitor.process_pending_events(&event_handler);
 //!    }
 //! }
 //! ```
@@ -71,6 +69,8 @@
 #![deny(broken_intra_doc_links)]
 #![deny(missing_docs)]
 
+#![cfg_attr(docsrs, feature(doc_auto_cfg))]
+
 use bitcoin::secp256k1::key::PublicKey;
 
 use tokio::net::TcpStream;
@@ -80,10 +80,12 @@ use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
 
 use lightning::ln::peer_handler;
 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+use lightning::ln::peer_handler::CustomMessageHandler;
+use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
 use lightning::util::logger::Logger;
 
 use std::task;
+use std::net::IpAddr;
 use std::net::SocketAddr;
 use std::net::TcpStream as StdTcpStream;
 use std::sync::{Arc, Mutex};
@@ -119,10 +121,28 @@ struct Connection {
        id: u64,
 }
 impl Connection {
-       async fn schedule_read<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
-                       CMH: ChannelMessageHandler + 'static,
-                       RMH: RoutingMessageHandler + 'static,
-                       L: Logger + 'static + ?Sized {
+       async fn poll_event_process<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, mut event_receiver: mpsc::Receiver<()>) where
+                       CMH: ChannelMessageHandler + 'static + Send + Sync,
+                       RMH: RoutingMessageHandler + 'static + Send + Sync,
+                       L: Logger + 'static + ?Sized + Send + Sync,
+                       UMH: CustomMessageHandler + 'static + Send + Sync {
+               loop {
+                       if event_receiver.recv().await.is_none() {
+                               return;
+                       }
+                       peer_manager.process_events();
+               }
+       }
+
+       async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
+                       CMH: ChannelMessageHandler + 'static + Send + Sync,
+                       RMH: RoutingMessageHandler + 'static + Send + Sync,
+                       L: Logger + 'static + ?Sized + Send + Sync,
+                       UMH: CustomMessageHandler + 'static + Send + Sync {
+               // Create a waker to wake up poll_event_process, above
+               let (event_waker, event_receiver) = mpsc::channel(1);
+               tokio::spawn(Self::poll_event_process(Arc::clone(&peer_manager), event_receiver));
+
                // 8KB is nice and big but also should never cause any issues with stack overflowing.
                let mut buf = [0; 8192];
 
@@ -141,30 +161,23 @@ impl Connection {
                        PeerDisconnected
                }
                let disconnect_type = loop {
-                       macro_rules! shutdown_socket {
-                               ($err: expr, $need_disconnect: expr) => { {
-                                       println!("Disconnecting peer due to {}!", $err);
-                                       break $need_disconnect;
-                               } }
-                       }
-
                        let read_paused = {
                                let us_lock = us.lock().unwrap();
                                if us_lock.rl_requested_disconnect {
-                                       shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection);
+                                       break Disconnect::CloseConnection;
                                }
                                us_lock.read_paused
                        };
                        tokio::select! {
                                v = write_avail_receiver.recv() => {
                                        assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
-                                       if let Err(e) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
-                                               shutdown_socket!(e, Disconnect::CloseConnection);
+                                       if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
+                                               break Disconnect::CloseConnection;
                                        }
                                },
                                _ = read_wake_receiver.recv() => {},
                                read = reader.read(&mut buf), if !read_paused => match read {
-                                       Ok(0) => shutdown_socket!("Connection closed", Disconnect::PeerDisconnected),
+                                       Ok(0) => break Disconnect::PeerDisconnected,
                                        Ok(len) => {
                                                let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
                                                let mut us_lock = us.lock().unwrap();
@@ -174,13 +187,13 @@ impl Connection {
                                                                        us_lock.read_paused = true;
                                                                }
                                                        },
-                                                       Err(e) => shutdown_socket!(e, Disconnect::CloseConnection),
+                                                       Err(_) => break Disconnect::CloseConnection,
                                                }
                                        },
-                                       Err(e) => shutdown_socket!(e, Disconnect::PeerDisconnected),
+                                       Err(_) => break Disconnect::PeerDisconnected,
                                },
                        }
-                       peer_manager.process_events();
+                       let _ = event_waker.try_send(());
                };
                let writer_option = us.lock().unwrap().writer.take();
                if let Some(mut writer) = writer_option {
@@ -222,15 +235,26 @@ impl Connection {
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-pub fn setup_inbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
                CMH: ChannelMessageHandler + 'static + Send + Sync,
                RMH: RoutingMessageHandler + 'static + Send + Sync,
-               L: Logger + 'static + ?Sized + Send + Sync {
+               L: Logger + 'static + ?Sized + Send + Sync,
+               UMH: CustomMessageHandler + 'static + Send + Sync {
+       let ip_addr = stream.peer_addr().unwrap();
        let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
 
-       let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone())) {
+       let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), match ip_addr.ip() {
+               IpAddr::V4(ip) => Some(NetAddress::IPv4 {
+                       addr: ip.octets(),
+                       port: ip_addr.port(),
+               }),
+               IpAddr::V6(ip) => Some(NetAddress::IPv6 {
+                       addr: ip.octets(),
+                       port: ip_addr.port(),
+               }),
+       }) {
                Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
        } else {
                // Note that we will skip socket_disconnected here, in accordance with the PeerManager
@@ -262,15 +286,25 @@ pub fn setup_inbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<So
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-pub fn setup_outbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
                CMH: ChannelMessageHandler + 'static + Send + Sync,
                RMH: RoutingMessageHandler + 'static + Send + Sync,
-               L: Logger + 'static + ?Sized + Send + Sync {
+               L: Logger + 'static + ?Sized + Send + Sync,
+               UMH: CustomMessageHandler + 'static + Send + Sync {
+       let ip_addr = stream.peer_addr().unwrap();
        let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
-
-       let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) {
+       let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), match ip_addr.ip() {
+               IpAddr::V4(ip) => Some(NetAddress::IPv4 {
+                       addr: ip.octets(),
+                       port: ip_addr.port(),
+               }),
+               IpAddr::V6(ip) => Some(NetAddress::IPv6 {
+                       addr: ip.octets(),
+                       port: ip_addr.port(),
+               }),
+       }) {
                Some(tokio::spawn(async move {
                        // We should essentially always have enough room in a TCP socket buffer to send the
                        // initial 10s of bytes. However, tokio running in single-threaded mode will always
@@ -332,10 +366,11 @@ pub fn setup_outbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<S
 /// disconnected and associated handling futures are freed, though, because all processing in said
 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
 /// make progress.
-pub async fn connect_outbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
+pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
                CMH: ChannelMessageHandler + 'static + Send + Sync,
                RMH: RoutingMessageHandler + 'static + Send + Sync,
-               L: Logger + 'static + ?Sized + Send + Sync {
+               L: Logger + 'static + ?Sized + Send + Sync,
+               UMH: CustomMessageHandler + 'static + Send + Sync {
        if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
                Some(setup_outbound(peer_manager, their_node_id, stream))
        } else { None }
@@ -496,10 +531,9 @@ mod tests {
                fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
                fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
                fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
-               fn handle_htlc_fail_channel_update(&self, _update: &HTLCFailChannelUpdate) { }
                fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { Vec::new() }
                fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<NodeAnnouncement> { Vec::new() }
-               fn sync_routing_table(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
+               fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
                fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
                fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
                fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
@@ -563,7 +597,7 @@ mod tests {
                let a_manager = Arc::new(PeerManager::new(MessageHandler {
                        chan_handler: Arc::clone(&a_handler),
                        route_handler: Arc::clone(&a_handler),
-               }, a_key.clone(), &[1; 32], Arc::new(TestLogger())));
+               }, a_key.clone(), &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
 
                let (b_connected_sender, mut b_connected) = mpsc::channel(1);
                let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1);
@@ -577,7 +611,7 @@ mod tests {
                let b_manager = Arc::new(PeerManager::new(MessageHandler {
                        chan_handler: Arc::clone(&b_handler),
                        route_handler: Arc::clone(&b_handler),
-               }, b_key.clone(), &[2; 32], Arc::new(TestLogger())));
+               }, b_key.clone(), &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
 
                // We bind on localhost, hoping the environment is properly configured with a local
                // address. This may not always be the case in containers and the like, so if this test is