X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-net-tokio%2Fsrc%2Flib.rs;h=2f0c96396bf92f69d2c9f88e4a1526c4990c96d7;hb=ecd283ea23760a8d24b7135228edd34f66999269;hp=37c9ddad76204ee68dda7656a81772bb97ef6fca;hpb=ba1349982ba28657c9e2d03a5b02c3ecc054b5cc;p=rust-lightning diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 37c9ddad..2f0c9639 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -36,12 +36,10 @@ use tokio::{io, time}; use tokio::sync::mpsc; use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use lightning::chain::keysinterface::NodeSigner; use lightning::ln::peer_handler; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; -use lightning::ln::peer_handler::CustomMessageHandler; -use lightning::ln::msgs::{ChannelMessageHandler, NetAddress, OnionMessageHandler, RoutingMessageHandler}; -use lightning::util::logger::Logger; +use lightning::ln::peer_handler::APeerManager; +use lightning::ln::msgs::NetAddress; use std::ops::Deref; use std::task; @@ -80,53 +78,25 @@ struct Connection { id: u64, } impl Connection { - async fn poll_event_process( + async fn poll_event_process( peer_manager: PM, mut event_receiver: mpsc::Receiver<()>, - ) where - PM: Deref> + 'static + Send + Sync, - CMH: Deref + 'static + Send + Sync, - RMH: Deref + 'static + Send + Sync, - OMH: Deref + 'static + Send + Sync, - L: Deref + 'static + Send + Sync, - UMH: Deref + 'static + Send + Sync, - NS: Deref + 'static + Send + Sync, - CMH::Target: ChannelMessageHandler + Send + Sync, - RMH::Target: RoutingMessageHandler + Send + Sync, - OMH::Target: OnionMessageHandler + Send + Sync, - L::Target: Logger + Send + Sync, - UMH::Target: CustomMessageHandler + Send + Sync, - NS::Target: NodeSigner + Send + Sync, - { + ) where PM::Target: APeerManager { loop { if event_receiver.recv().await.is_none() { return; } - peer_manager.process_events(); + peer_manager.as_ref().process_events(); } } - async fn schedule_read( + async fn schedule_read( peer_manager: PM, us: Arc>, mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>, - ) where - PM: Deref> + 'static + Send + Sync + Clone, - CMH: Deref + 'static + Send + Sync, - RMH: Deref + 'static + Send + Sync, - OMH: Deref + 'static + Send + Sync, - L: Deref + 'static + Send + Sync, - UMH: Deref + 'static + Send + Sync, - NS: Deref + 'static + Send + Sync, - CMH::Target: ChannelMessageHandler + 'static + Send + Sync, - RMH::Target: RoutingMessageHandler + 'static + Send + Sync, - OMH::Target: OnionMessageHandler + 'static + Send + Sync, - L::Target: Logger + 'static + Send + Sync, - UMH::Target: CustomMessageHandler + 'static + Send + Sync, - NS::Target: NodeSigner + 'static + Send + Sync, - { + ) where PM::Target: APeerManager { // Create a waker to wake up poll_event_process, above let (event_waker, event_receiver) = mpsc::channel(1); tokio::spawn(Self::poll_event_process(peer_manager.clone(), event_receiver)); @@ -160,7 +130,7 @@ impl Connection { tokio::select! { v = write_avail_receiver.recv() => { assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc! - if peer_manager.write_buffer_space_avail(&mut our_descriptor).is_err() { + if peer_manager.as_ref().write_buffer_space_avail(&mut our_descriptor).is_err() { break Disconnect::CloseConnection; } }, @@ -168,7 +138,7 @@ impl Connection { read = reader.read(&mut buf), if !read_paused => match read { Ok(0) => break Disconnect::PeerDisconnected, Ok(len) => { - let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]); + let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]); let mut us_lock = us.lock().unwrap(); match read_res { Ok(pause_read) => { @@ -189,7 +159,7 @@ impl Connection { // our timeslice to another task we may just spin on this peer, starving other peers // and eventually disconnecting them for ping timeouts. Instead, we explicitly yield // here. - tokio::task::yield_now().await; + let _ = tokio::task::yield_now().await; }; let writer_option = us.lock().unwrap().writer.take(); if let Some(mut writer) = writer_option { @@ -197,8 +167,8 @@ impl Connection { let _ = writer.shutdown().await; } if let Disconnect::PeerDisconnected = disconnect_type { - peer_manager.socket_disconnected(&our_descriptor); - peer_manager.process_events(); + peer_manager.as_ref().socket_disconnected(&our_descriptor); + peer_manager.as_ref().process_events(); } } @@ -245,30 +215,17 @@ fn get_addr_from_stream(stream: &StdTcpStream) -> Option { /// The returned future will complete when the peer is disconnected and associated handling /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do /// not need to poll the provided future in order to make progress. -pub fn setup_inbound( +pub fn setup_inbound( peer_manager: PM, stream: StdTcpStream, -) -> impl std::future::Future where - PM: Deref> + 'static + Send + Sync + Clone, - CMH: Deref + 'static + Send + Sync, - RMH: Deref + 'static + Send + Sync, - OMH: Deref + 'static + Send + Sync, - L: Deref + 'static + Send + Sync, - UMH: Deref + 'static + Send + Sync, - NS: Deref + 'static + Send + Sync, - CMH::Target: ChannelMessageHandler + Send + Sync, - RMH::Target: RoutingMessageHandler + Send + Sync, - OMH::Target: OnionMessageHandler + Send + Sync, - L::Target: Logger + Send + Sync, - UMH::Target: CustomMessageHandler + Send + Sync, - NS::Target: NodeSigner + Send + Sync, -{ +) -> impl std::future::Future +where PM::Target: APeerManager { let remote_addr = get_addr_from_stream(&stream); let (reader, write_receiver, read_receiver, us) = Connection::new(stream); #[cfg(test)] let last_us = Arc::clone(&us); - let handle_opt = if peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr).is_ok() { + let handle_opt = if peer_manager.as_ref().new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr).is_ok() { Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver))) } else { // Note that we will skip socket_disconnected here, in accordance with the PeerManager @@ -300,30 +257,17 @@ pub fn setup_inbound( /// The returned future will complete when the peer is disconnected and associated handling /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do /// not need to poll the provided future in order to make progress. -pub fn setup_outbound( +pub fn setup_outbound( peer_manager: PM, their_node_id: PublicKey, stream: StdTcpStream, -) -> impl std::future::Future where - PM: Deref> + 'static + Send + Sync + Clone, - CMH: Deref + 'static + Send + Sync, - RMH: Deref + 'static + Send + Sync, - OMH: Deref + 'static + Send + Sync, - L: Deref + 'static + Send + Sync, - UMH: Deref + 'static + Send + Sync, - NS: Deref + 'static + Send + Sync, - CMH::Target: ChannelMessageHandler + Send + Sync, - RMH::Target: RoutingMessageHandler + Send + Sync, - OMH::Target: OnionMessageHandler + Send + Sync, - L::Target: Logger + Send + Sync, - UMH::Target: CustomMessageHandler + Send + Sync, - NS::Target: NodeSigner + Send + Sync, -{ +) -> impl std::future::Future +where PM::Target: APeerManager { let remote_addr = get_addr_from_stream(&stream); let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream); #[cfg(test)] let last_us = Arc::clone(&us); - let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) { + let handle_opt = if let Ok(initial_send) = peer_manager.as_ref().new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) { Some(tokio::spawn(async move { // We should essentially always have enough room in a TCP socket buffer to send the // initial 10s of bytes. However, tokio running in single-threaded mode will always @@ -342,7 +286,7 @@ pub fn setup_outbound( }, _ => { eprintln!("Failed to write first full message to socket!"); - peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us))); + peer_manager.as_ref().socket_disconnected(&SocketDescriptor::new(Arc::clone(&us))); break Err(()); } } @@ -385,25 +329,12 @@ pub fn setup_outbound( /// disconnected and associated handling futures are freed, though, because all processing in said /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to /// make progress. -pub async fn connect_outbound( +pub async fn connect_outbound( peer_manager: PM, their_node_id: PublicKey, addr: SocketAddr, -) -> Option> where - PM: Deref> + 'static + Send + Sync + Clone, - CMH: Deref + 'static + Send + Sync, - RMH: Deref + 'static + Send + Sync, - OMH: Deref + 'static + Send + Sync, - L: Deref + 'static + Send + Sync, - UMH: Deref + 'static + Send + Sync, - NS: Deref + 'static + Send + Sync, - CMH::Target: ChannelMessageHandler + Send + Sync, - RMH::Target: RoutingMessageHandler + Send + Sync, - OMH::Target: OnionMessageHandler + Send + Sync, - L::Target: Logger + Send + Sync, - UMH::Target: CustomMessageHandler + Send + Sync, - NS::Target: NodeSigner + Send + Sync, -{ +) -> Option> +where PM::Target: APeerManager { if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await { Some(setup_outbound(peer_manager, their_node_id, stream)) } else { None } @@ -598,6 +529,17 @@ mod tests { fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {} fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {} fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {} + fn handle_open_channel_v2(&self, _their_node_id: &PublicKey, _msg: &OpenChannelV2) {} + fn handle_accept_channel_v2(&self, _their_node_id: &PublicKey, _msg: &AcceptChannelV2) {} + fn handle_tx_add_input(&self, _their_node_id: &PublicKey, _msg: &TxAddInput) {} + fn handle_tx_add_output(&self, _their_node_id: &PublicKey, _msg: &TxAddOutput) {} + fn handle_tx_remove_input(&self, _their_node_id: &PublicKey, _msg: &TxRemoveInput) {} + fn handle_tx_remove_output(&self, _their_node_id: &PublicKey, _msg: &TxRemoveOutput) {} + fn handle_tx_complete(&self, _their_node_id: &PublicKey, _msg: &TxComplete) {} + fn handle_tx_signatures(&self, _their_node_id: &PublicKey, _msg: &TxSignatures) {} + fn handle_tx_init_rbf(&self, _their_node_id: &PublicKey, _msg: &TxInitRbf) {} + fn handle_tx_ack_rbf(&self, _their_node_id: &PublicKey, _msg: &TxAckRbf) {} + fn handle_tx_abort(&self, _their_node_id: &PublicKey, _msg: &TxAbort) {} fn peer_disconnected(&self, their_node_id: &PublicKey) { if *their_node_id == self.expected_pubkey { self.disconnected_flag.store(true, Ordering::SeqCst); @@ -659,7 +601,8 @@ mod tests { chan_handler: Arc::clone(&a_handler), route_handler: Arc::clone(&a_handler), onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), - }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key)))); + custom_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), + }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(TestNodeSigner::new(a_key)))); let (b_connected_sender, mut b_connected) = mpsc::channel(1); let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1); @@ -674,7 +617,8 @@ mod tests { chan_handler: Arc::clone(&b_handler), route_handler: Arc::clone(&b_handler), onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), - }, 0, &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(b_key)))); + custom_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), + }, 0, &[2; 32], Arc::new(TestLogger()), Arc::new(TestNodeSigner::new(b_key)))); // We bind on localhost, hoping the environment is properly configured with a local // address. This may not always be the case in containers and the like, so if this test is @@ -727,7 +671,8 @@ mod tests { chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()), onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), - }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key)))); + custom_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), + }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(TestNodeSigner::new(a_key)))); // Make two connections, one for an inbound and one for an outbound connection let conn_a = {