X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-net-tokio%2Fsrc%2Flib.rs;h=cee7c5c1b982882244cdb33a5d82d7abb0c8eadc;hb=b1550524cfe2689e3743ddfc0a78527d26de8613;hp=6e2a0c22eef397b18433aefa7a575cd2084ecc73;hpb=38a544eafd51f0446b1bb460a42107d35c740388;p=rust-lightning diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 6e2a0c22..cee7c5c1 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -12,21 +12,19 @@ //! //! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a //! TcpStream and a reference to a PeerManager and the rest is handled", except for the -//! [Event](../lightning/util/events/enum.Event.html) handlng mechanism, see below. +//! [Event](../lightning/util/events/enum.Event.html) handling mechanism; see example below. //! //! The PeerHandler, due to the fire-and-forget nature of this logic, must be an Arc, and must use //! the SocketDescriptor provided here as the PeerHandler's SocketDescriptor. //! -//! Three methods are exposed to register a new connection for handling in tokio::spawn calls, see -//! their individual docs for more. All three take a -//! [mpsc::Sender<()>](../tokio/sync/mpsc/struct.Sender.html) which is sent into every time -//! something occurs which may result in lightning [Events](../lightning/util/events/enum.Event.html). -//! The call site should, thus, look something like this: +//! Three methods are exposed to register a new connection for handling in tokio::spawn calls; see +//! their individual docs for details. +//! +//! # Example //! ``` -//! use tokio::sync::mpsc; //! use std::net::TcpStream; //! use bitcoin::secp256k1::key::PublicKey; -//! use lightning::util::events::EventsProvider; +//! use lightning::util::events::{Event, EventHandler, EventsProvider}; //! use std::net::SocketAddr; //! use std::sync::Arc; //! @@ -36,45 +34,43 @@ //! type Logger = dyn lightning::util::logger::Logger + Send + Sync; //! type ChainAccess = dyn lightning::chain::Access + Send + Sync; //! type ChainFilter = dyn lightning::chain::Filter + Send + Sync; -//! type DataPersister = dyn lightning::chain::channelmonitor::Persist + Send + Sync; +//! type DataPersister = dyn lightning::chain::chainmonitor::Persist + Send + Sync; //! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor, Arc, Arc, Arc, Arc>; //! type ChannelManager = Arc>; //! type PeerManager = Arc>; //! //! // Connect to node with pubkey their_node_id at addr: //! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) { -//! let (sender, mut receiver) = mpsc::channel(2); -//! lightning_net_tokio::connect_outbound(peer_manager, sender, their_node_id, addr).await; -//! loop { -//! receiver.recv().await; -//! for _event in channel_manager.get_and_clear_pending_events().drain(..) { -//! // Handle the event! -//! } -//! for _event in chain_monitor.get_and_clear_pending_events().drain(..) { -//! // Handle the event! -//! } -//! } +//! lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await; +//! loop { +//! let event_handler = |event: &Event| { +//! // Handle the event! +//! }; +//! channel_manager.await_persistable_update(); +//! channel_manager.process_pending_events(&event_handler); +//! chain_monitor.process_pending_events(&event_handler); +//! } //! } //! //! // Begin reading from a newly accepted socket and talk to the peer: //! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc, channel_manager: ChannelManager, socket: TcpStream) { -//! let (sender, mut receiver) = mpsc::channel(2); -//! lightning_net_tokio::setup_inbound(peer_manager, sender, socket); -//! loop { -//! receiver.recv().await; -//! for _event in channel_manager.get_and_clear_pending_events().drain(..) { -//! // Handle the event! -//! } -//! for _event in chain_monitor.get_and_clear_pending_events().drain(..) { -//! // Handle the event! -//! } -//! } +//! lightning_net_tokio::setup_inbound(peer_manager, socket); +//! loop { +//! let event_handler = |event: &Event| { +//! // Handle the event! +//! }; +//! channel_manager.await_persistable_update(); +//! channel_manager.process_pending_events(&event_handler); +//! chain_monitor.process_pending_events(&event_handler); +//! } //! } //! ``` #![deny(broken_intra_doc_links)] #![deny(missing_docs)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] + use bitcoin::secp256k1::key::PublicKey; use tokio::net::TcpStream; @@ -84,13 +80,15 @@ use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; use lightning::ln::peer_handler; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; -use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; +use lightning::ln::peer_handler::CustomMessageHandler; +use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress}; use lightning::util::logger::Logger; -use std::{task, thread}; +use std::task; +use std::net::IpAddr; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; -use std::sync::{Arc, Mutex, MutexGuard}; +use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicU64, Ordering}; use std::time::Duration; use std::hash::Hash; @@ -102,7 +100,6 @@ static ID_COUNTER: AtomicU64 = AtomicU64::new(0); /// read future (which is returned by schedule_read). struct Connection { writer: Option>, - event_notify: mpsc::Sender<()>, // Because our PeerManager is templated by user-provided types, and we can't (as far as I can // tell) have a const RawWakerVTable built out of templated functions, we need some indirection // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail. @@ -119,31 +116,33 @@ struct Connection { // socket. To wake it up (without otherwise changing its state, we can push a value into this // Sender. read_waker: mpsc::Sender<()>, - // When we are told by rust-lightning to disconnect, we can't return to rust-lightning until we - // are sure we won't call any more read/write PeerManager functions with the same connection. - // This is set to true if we're in such a condition (with disconnect checked before with the - // top-level mutex held) and false when we can return. - block_disconnect_socket: bool, read_paused: bool, rl_requested_disconnect: bool, id: u64, } impl Connection { - fn event_trigger(us: &mut MutexGuard) { - match us.event_notify.try_send(()) { - Ok(_) => {}, - Err(mpsc::error::TrySendError::Full(_)) => { - // Ignore full errors as we just need the user to poll after this point, so if they - // haven't received the last send yet, it doesn't matter. - }, - _ => panic!() + async fn poll_event_process(peer_manager: Arc, Arc, Arc, Arc>>, mut event_receiver: mpsc::Receiver<()>) where + CMH: ChannelMessageHandler + 'static + Send + Sync, + RMH: RoutingMessageHandler + 'static + Send + Sync, + L: Logger + 'static + ?Sized + Send + Sync, + UMH: CustomMessageHandler + 'static + Send + Sync { + loop { + if event_receiver.recv().await.is_none() { + return; + } + peer_manager.process_events(); } } - async fn schedule_read(peer_manager: Arc, Arc, Arc>>, us: Arc>, mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where - CMH: ChannelMessageHandler + 'static, - RMH: RoutingMessageHandler + 'static, - L: Logger + 'static + ?Sized { - let peer_manager_ref = peer_manager.clone(); + + async fn schedule_read(peer_manager: Arc, Arc, Arc, Arc>>, us: Arc>, mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where + CMH: ChannelMessageHandler + 'static + Send + Sync, + RMH: RoutingMessageHandler + 'static + Send + Sync, + L: Logger + 'static + ?Sized + Send + Sync, + UMH: CustomMessageHandler + 'static + Send + Sync { + // Create a waker to wake up poll_event_process, above + let (event_waker, event_receiver) = mpsc::channel(1); + tokio::spawn(Self::poll_event_process(Arc::clone(&peer_manager), event_receiver)); + // 8KB is nice and big but also should never cause any issues with stack overflowing. let mut buf = [0; 8192]; @@ -162,38 +161,24 @@ impl Connection { PeerDisconnected } let disconnect_type = loop { - macro_rules! shutdown_socket { - ($err: expr, $need_disconnect: expr) => { { - println!("Disconnecting peer due to {}!", $err); - break $need_disconnect; - } } - } - - macro_rules! prepare_read_write_call { - () => { { - let mut us_lock = us.lock().unwrap(); - if us_lock.rl_requested_disconnect { - shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection); - } - us_lock.block_disconnect_socket = true; - } } - } - - let read_paused = us.lock().unwrap().read_paused; + let read_paused = { + let us_lock = us.lock().unwrap(); + if us_lock.rl_requested_disconnect { + break Disconnect::CloseConnection; + } + us_lock.read_paused + }; tokio::select! { v = write_avail_receiver.recv() => { assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc! - prepare_read_write_call!(); - if let Err(e) = peer_manager.write_buffer_space_avail(&mut our_descriptor) { - shutdown_socket!(e, Disconnect::CloseConnection); + if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) { + break Disconnect::CloseConnection; } - us.lock().unwrap().block_disconnect_socket = false; }, _ = read_wake_receiver.recv() => {}, read = reader.read(&mut buf), if !read_paused => match read { - Ok(0) => shutdown_socket!("Connection closed", Disconnect::PeerDisconnected), + Ok(0) => break Disconnect::PeerDisconnected, Ok(len) => { - prepare_read_write_call!(); let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]); let mut us_lock = us.lock().unwrap(); match read_res { @@ -201,15 +186,14 @@ impl Connection { if pause_read { us_lock.read_paused = true; } - Self::event_trigger(&mut us_lock); }, - Err(e) => shutdown_socket!(e, Disconnect::CloseConnection), + Err(_) => break Disconnect::CloseConnection, } - us_lock.block_disconnect_socket = false; }, - Err(e) => shutdown_socket!(e, Disconnect::PeerDisconnected), + Err(_) => break Disconnect::PeerDisconnected, }, } + let _ = event_waker.try_send(()); }; let writer_option = us.lock().unwrap().writer.take(); if let Some(mut writer) = writer_option { @@ -217,12 +201,12 @@ impl Connection { let _ = writer.shutdown().await; } if let Disconnect::PeerDisconnected = disconnect_type { - peer_manager_ref.socket_disconnected(&our_descriptor); - Self::event_trigger(&mut us.lock().unwrap()); + peer_manager.socket_disconnected(&our_descriptor); + peer_manager.process_events(); } } - fn new(event_notify: mpsc::Sender<()>, stream: StdTcpStream) -> (io::ReadHalf, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { + fn new(stream: StdTcpStream) -> (io::ReadHalf, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { // We only ever need a channel of depth 1 here: if we returned a non-full write to the // PeerManager, we will eventually get notified that there is room in the socket to write // new bytes, which will generate an event. That event will be popped off the queue before @@ -238,8 +222,8 @@ impl Connection { (reader, write_receiver, read_receiver, Arc::new(Mutex::new(Self { - writer: Some(writer), event_notify, write_avail, read_waker, read_paused: false, - block_disconnect_socket: false, rl_requested_disconnect: false, + writer: Some(writer), write_avail, read_waker, read_paused: false, + rl_requested_disconnect: false, id: ID_COUNTER.fetch_add(1, Ordering::AcqRel) }))) } @@ -251,17 +235,26 @@ impl Connection { /// The returned future will complete when the peer is disconnected and associated handling /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do /// not need to poll the provided future in order to make progress. -/// -/// See the module-level documentation for how to handle the event_notify mpsc::Sender. -pub fn setup_inbound(peer_manager: Arc, Arc, Arc>>, event_notify: mpsc::Sender<()>, stream: StdTcpStream) -> impl std::future::Future where +pub fn setup_inbound(peer_manager: Arc, Arc, Arc, Arc>>, stream: StdTcpStream) -> impl std::future::Future where CMH: ChannelMessageHandler + 'static + Send + Sync, RMH: RoutingMessageHandler + 'static + Send + Sync, - L: Logger + 'static + ?Sized + Send + Sync { - let (reader, write_receiver, read_receiver, us) = Connection::new(event_notify, stream); + L: Logger + 'static + ?Sized + Send + Sync, + UMH: CustomMessageHandler + 'static + Send + Sync { + let ip_addr = stream.peer_addr().unwrap(); + let (reader, write_receiver, read_receiver, us) = Connection::new(stream); #[cfg(debug_assertions)] let last_us = Arc::clone(&us); - let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone())) { + let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), match ip_addr.ip() { + IpAddr::V4(ip) => Some(NetAddress::IPv4 { + addr: ip.octets(), + port: ip_addr.port(), + }), + IpAddr::V6(ip) => Some(NetAddress::IPv6 { + addr: ip.octets(), + port: ip_addr.port(), + }), + }) { Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver))) } else { // Note that we will skip socket_disconnected here, in accordance with the PeerManager @@ -293,17 +286,25 @@ pub fn setup_inbound(peer_manager: Arc(peer_manager: Arc, Arc, Arc>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future where +pub fn setup_outbound(peer_manager: Arc, Arc, Arc, Arc>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future where CMH: ChannelMessageHandler + 'static + Send + Sync, RMH: RoutingMessageHandler + 'static + Send + Sync, - L: Logger + 'static + ?Sized + Send + Sync { - let (reader, mut write_receiver, read_receiver, us) = Connection::new(event_notify, stream); + L: Logger + 'static + ?Sized + Send + Sync, + UMH: CustomMessageHandler + 'static + Send + Sync { + let ip_addr = stream.peer_addr().unwrap(); + let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream); #[cfg(debug_assertions)] let last_us = Arc::clone(&us); - - let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) { + let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), match ip_addr.ip() { + IpAddr::V4(ip) => Some(NetAddress::IPv4 { + addr: ip.octets(), + port: ip_addr.port(), + }), + IpAddr::V6(ip) => Some(NetAddress::IPv6 { + addr: ip.octets(), + port: ip_addr.port(), + }), + }) { Some(tokio::spawn(async move { // We should essentially always have enough room in a TCP socket buffer to send the // initial 10s of bytes. However, tokio running in single-threaded mode will always @@ -365,14 +366,13 @@ pub fn setup_outbound(peer_manager: Arc(peer_manager: Arc, Arc, Arc>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) -> Option> where +pub async fn connect_outbound(peer_manager: Arc, Arc, Arc, Arc>>, their_node_id: PublicKey, addr: SocketAddr) -> Option> where CMH: ChannelMessageHandler + 'static + Send + Sync, RMH: RoutingMessageHandler + 'static + Send + Sync, - L: Logger + 'static + ?Sized + Send + Sync { + L: Logger + 'static + ?Sized + Send + Sync, + UMH: CustomMessageHandler + 'static + Send + Sync { if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await { - Some(setup_outbound(peer_manager, event_notify, their_node_id, stream)) + Some(setup_outbound(peer_manager, their_node_id, stream)) } else { None } } @@ -472,18 +472,10 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { } fn disconnect_socket(&mut self) { - { - let mut us = self.conn.lock().unwrap(); - us.rl_requested_disconnect = true; - us.read_paused = true; - // Wake up the sending thread, assuming it is still alive - let _ = us.write_avail.try_send(()); - // Happy-path return: - if !us.block_disconnect_socket { return; } - } - while self.conn.lock().unwrap().block_disconnect_socket { - thread::yield_now(); - } + let mut us = self.conn.lock().unwrap(); + us.rl_requested_disconnect = true; + // Wake up the sending thread, assuming it is still alive + let _ = us.write_avail.try_send(()); } } impl Clone for SocketDescriptor { @@ -539,10 +531,9 @@ mod tests { fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result { Ok(false) } fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result { Ok(false) } fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result { Ok(false) } - fn handle_htlc_fail_channel_update(&self, _update: &HTLCFailChannelUpdate) { } fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option, Option)> { Vec::new() } fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec { Vec::new() } - fn sync_routing_table(&self, _their_node_id: &PublicKey, _init_msg: &Init) { } + fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) { } fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) } @@ -606,7 +597,7 @@ mod tests { let a_manager = Arc::new(PeerManager::new(MessageHandler { chan_handler: Arc::clone(&a_handler), route_handler: Arc::clone(&a_handler), - }, a_key.clone(), &[1; 32], Arc::new(TestLogger()))); + }, a_key.clone(), &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); let (b_connected_sender, mut b_connected) = mpsc::channel(1); let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1); @@ -620,7 +611,7 @@ mod tests { let b_manager = Arc::new(PeerManager::new(MessageHandler { chan_handler: Arc::clone(&b_handler), route_handler: Arc::clone(&b_handler), - }, b_key.clone(), &[2; 32], Arc::new(TestLogger()))); + }, b_key.clone(), &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); // We bind on localhost, hoping the environment is properly configured with a local // address. This may not always be the case in containers and the like, so if this test is @@ -634,9 +625,8 @@ mod tests { (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0) } else { panic!("Failed to bind to v4 localhost on common ports"); }; - let (sender, _receiver) = mpsc::channel(2); - let fut_a = super::setup_outbound(Arc::clone(&a_manager), sender.clone(), b_pub, conn_a); - let fut_b = super::setup_inbound(b_manager, sender, conn_b); + let fut_a = super::setup_outbound(Arc::clone(&a_manager), b_pub, conn_a); + let fut_b = super::setup_inbound(b_manager, conn_b); tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap(); tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap();