X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-net-tokio%2Fsrc%2Flib.rs;h=a513e603efe54d2654bdce1a59a74b59fa9e9b93;hb=4be56b93b043189f580bb62026b6d8b0da10fd4e;hp=45fe9b12f61b17a7b078805de1e6344efa91fbf3;hpb=50d12600b43c0c4a65547c6d37fca3efce0a12f2;p=rust-lightning diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 45fe9b12..a513e603 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -32,12 +32,13 @@ //! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync; //! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync; //! type Logger = dyn lightning::util::logger::Logger + Send + Sync; -//! type ChainAccess = dyn lightning::chain::Access + Send + Sync; +//! type NodeSigner = dyn lightning::chain::keysinterface::NodeSigner + Send + Sync; +//! type UtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync; //! type ChainFilter = dyn lightning::chain::Filter + Send + Sync; //! type DataPersister = dyn lightning::chain::chainmonitor::Persist + Send + Sync; //! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor, Arc, Arc, Arc, Arc>; //! type ChannelManager = Arc>; -//! type PeerManager = Arc>; +//! type PeerManager = Arc>; //! //! // Connect to node with pubkey their_node_id at addr: //! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) { @@ -80,6 +81,7 @@ use tokio::{io, time}; use tokio::sync::mpsc; use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use lightning::chain::keysinterface::NodeSigner; use lightning::ln::peer_handler; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; use lightning::ln::peer_handler::CustomMessageHandler; @@ -123,21 +125,23 @@ struct Connection { id: u64, } impl Connection { - async fn poll_event_process( + async fn poll_event_process( peer_manager: PM, mut event_receiver: mpsc::Receiver<()>, ) where - PM: Deref> + 'static + Send + Sync, + PM: Deref> + 'static + Send + Sync, CMH: Deref + 'static + Send + Sync, RMH: Deref + 'static + Send + Sync, OMH: Deref + 'static + Send + Sync, L: Deref + 'static + Send + Sync, UMH: Deref + 'static + Send + Sync, + NS: Deref + 'static + Send + Sync, CMH::Target: ChannelMessageHandler + Send + Sync, RMH::Target: RoutingMessageHandler + Send + Sync, OMH::Target: OnionMessageHandler + Send + Sync, L::Target: Logger + Send + Sync, UMH::Target: CustomMessageHandler + Send + Sync, + NS::Target: NodeSigner + Send + Sync, { loop { if event_receiver.recv().await.is_none() { @@ -147,31 +151,34 @@ impl Connection { } } - async fn schedule_read( + async fn schedule_read( peer_manager: PM, us: Arc>, mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>, ) where - PM: Deref> + 'static + Send + Sync + Clone, + PM: Deref> + 'static + Send + Sync + Clone, CMH: Deref + 'static + Send + Sync, RMH: Deref + 'static + Send + Sync, OMH: Deref + 'static + Send + Sync, L: Deref + 'static + Send + Sync, UMH: Deref + 'static + Send + Sync, + NS: Deref + 'static + Send + Sync, CMH::Target: ChannelMessageHandler + 'static + Send + Sync, RMH::Target: RoutingMessageHandler + 'static + Send + Sync, OMH::Target: OnionMessageHandler + 'static + Send + Sync, L::Target: Logger + 'static + Send + Sync, UMH::Target: CustomMessageHandler + 'static + Send + Sync, + NS::Target: NodeSigner + 'static + Send + Sync, { // Create a waker to wake up poll_event_process, above let (event_waker, event_receiver) = mpsc::channel(1); tokio::spawn(Self::poll_event_process(peer_manager.clone(), event_receiver)); - // 8KB is nice and big but also should never cause any issues with stack overflowing. - let mut buf = [0; 8192]; + // 4KiB is nice and big without handling too many messages all at once, giving other peers + // a chance to do some work. + let mut buf = [0; 4096]; let mut our_descriptor = SocketDescriptor::new(us.clone()); // An enum describing why we did/are disconnecting: @@ -198,7 +205,7 @@ impl Connection { tokio::select! { v = write_avail_receiver.recv() => { assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc! - if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) { + if peer_manager.write_buffer_space_avail(&mut our_descriptor).is_err() { break Disconnect::CloseConnection; } }, @@ -283,28 +290,30 @@ fn get_addr_from_stream(stream: &StdTcpStream) -> Option { /// The returned future will complete when the peer is disconnected and associated handling /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do /// not need to poll the provided future in order to make progress. -pub fn setup_inbound( +pub fn setup_inbound( peer_manager: PM, stream: StdTcpStream, ) -> impl std::future::Future where - PM: Deref> + 'static + Send + Sync + Clone, + PM: Deref> + 'static + Send + Sync + Clone, CMH: Deref + 'static + Send + Sync, RMH: Deref + 'static + Send + Sync, OMH: Deref + 'static + Send + Sync, L: Deref + 'static + Send + Sync, UMH: Deref + 'static + Send + Sync, + NS: Deref + 'static + Send + Sync, CMH::Target: ChannelMessageHandler + Send + Sync, RMH::Target: RoutingMessageHandler + Send + Sync, OMH::Target: OnionMessageHandler + Send + Sync, L::Target: Logger + Send + Sync, UMH::Target: CustomMessageHandler + Send + Sync, + NS::Target: NodeSigner + Send + Sync, { let remote_addr = get_addr_from_stream(&stream); let (reader, write_receiver, read_receiver, us) = Connection::new(stream); - #[cfg(debug_assertions)] + #[cfg(test)] let last_us = Arc::clone(&us); - let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) { + let handle_opt = if peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr).is_ok() { Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver))) } else { // Note that we will skip socket_disconnected here, in accordance with the PeerManager @@ -322,8 +331,8 @@ pub fn setup_inbound( // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't // keep too many wakers around, this makes sense. The race should be rare (we do // some work after shutdown()) and an error would be a major memory leak. - #[cfg(debug_assertions)] - assert!(Arc::try_unwrap(last_us).is_ok()); + #[cfg(test)] + debug_assert!(Arc::try_unwrap(last_us).is_ok()); } } } @@ -336,26 +345,28 @@ pub fn setup_inbound( /// The returned future will complete when the peer is disconnected and associated handling /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do /// not need to poll the provided future in order to make progress. -pub fn setup_outbound( +pub fn setup_outbound( peer_manager: PM, their_node_id: PublicKey, stream: StdTcpStream, ) -> impl std::future::Future where - PM: Deref> + 'static + Send + Sync + Clone, + PM: Deref> + 'static + Send + Sync + Clone, CMH: Deref + 'static + Send + Sync, RMH: Deref + 'static + Send + Sync, OMH: Deref + 'static + Send + Sync, L: Deref + 'static + Send + Sync, UMH: Deref + 'static + Send + Sync, + NS: Deref + 'static + Send + Sync, CMH::Target: ChannelMessageHandler + Send + Sync, RMH::Target: RoutingMessageHandler + Send + Sync, OMH::Target: OnionMessageHandler + Send + Sync, L::Target: Logger + Send + Sync, UMH::Target: CustomMessageHandler + Send + Sync, + NS::Target: NodeSigner + Send + Sync, { let remote_addr = get_addr_from_stream(&stream); let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream); - #[cfg(debug_assertions)] + #[cfg(test)] let last_us = Arc::clone(&us); let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) { Some(tokio::spawn(async move { @@ -401,8 +412,8 @@ pub fn setup_outbound( // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't // keep too many wakers around, this makes sense. The race should be rare (we do // some work after shutdown()) and an error would be a major memory leak. - #[cfg(debug_assertions)] - assert!(Arc::try_unwrap(last_us).is_ok()); + #[cfg(test)] + debug_assert!(Arc::try_unwrap(last_us).is_ok()); } } } @@ -419,22 +430,24 @@ pub fn setup_outbound( /// disconnected and associated handling futures are freed, though, because all processing in said /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to /// make progress. -pub async fn connect_outbound( +pub async fn connect_outbound( peer_manager: PM, their_node_id: PublicKey, addr: SocketAddr, ) -> Option> where - PM: Deref> + 'static + Send + Sync + Clone, + PM: Deref> + 'static + Send + Sync + Clone, CMH: Deref + 'static + Send + Sync, RMH: Deref + 'static + Send + Sync, OMH: Deref + 'static + Send + Sync, L: Deref + 'static + Send + Sync, UMH: Deref + 'static + Send + Sync, + NS: Deref + 'static + Send + Sync, CMH::Target: ChannelMessageHandler + Send + Sync, RMH::Target: RoutingMessageHandler + Send + Sync, OMH::Target: OnionMessageHandler + Send + Sync, L::Target: Logger + Send + Sync, UMH::Target: CustomMessageHandler + Send + Sync, + NS::Target: NodeSigner + Send + Sync, { if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await { Some(setup_outbound(peer_manager, their_node_id, stream)) @@ -572,7 +585,9 @@ mod tests { use lightning::ln::msgs::*; use lightning::ln::peer_handler::{MessageHandler, PeerManager}; use lightning::ln::features::NodeFeatures; + use lightning::routing::gossip::NodeId; use lightning::util::events::*; + use lightning::util::test_utils::TestNodeSigner; use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey}; use tokio::sync::mpsc; @@ -601,14 +616,15 @@ mod tests { fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result { Ok(false) } fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result { Ok(false) } fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option, Option)> { None } - fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option { None } - fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { Ok(()) } + fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option { None } + fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) } fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) } fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() } + fn processing_queue_high(&self) -> bool { false } } impl ChannelMessageHandler for MsgHandler { fn handle_open_channel(&self, _their_node_id: &PublicKey, _msg: &OpenChannel) {} @@ -627,13 +643,13 @@ mod tests { fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {} fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {} fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {} - fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) { + fn peer_disconnected(&self, their_node_id: &PublicKey) { if *their_node_id == self.expected_pubkey { self.disconnected_flag.store(true, Ordering::SeqCst); self.pubkey_disconnected.clone().try_send(()).unwrap(); } } - fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { + fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> { if *their_node_id == self.expected_pubkey { self.pubkey_connected.clone().try_send(()).unwrap(); } @@ -688,7 +704,7 @@ mod tests { chan_handler: Arc::clone(&a_handler), route_handler: Arc::clone(&a_handler), onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), - }, a_key.clone(), 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key)))); let (b_connected_sender, mut b_connected) = mpsc::channel(1); let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1); @@ -703,7 +719,7 @@ mod tests { chan_handler: Arc::clone(&b_handler), route_handler: Arc::clone(&b_handler), onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), - }, b_key.clone(), 0, &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + }, 0, &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(b_key)))); // We bind on localhost, hoping the environment is properly configured with a local // address. This may not always be the case in containers and the like, so if this test is @@ -756,7 +772,7 @@ mod tests { chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()), onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), - }, a_key, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key)))); // Make two connections, one for an inbound and one for an outbound connection let conn_a = {