//! # Example
//! ```
//! use std::net::TcpStream;
-//! use bitcoin::secp256k1::key::PublicKey;
+//! use bitcoin::secp256k1::PublicKey;
//! use lightning::util::events::{Event, EventHandler, EventsProvider};
//! use std::net::SocketAddr;
//! use std::sync::Arc;
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
-use bitcoin::secp256k1::key::PublicKey;
+use bitcoin::secp256k1::PublicKey;
use tokio::net::TcpStream;
use tokio::{io, time};
id: u64,
}
impl Connection {
+ async fn poll_event_process<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, mut event_receiver: mpsc::Receiver<()>) where
+ CMH: ChannelMessageHandler + 'static + Send + Sync,
+ RMH: RoutingMessageHandler + 'static + Send + Sync,
+ L: Logger + 'static + ?Sized + Send + Sync,
+ UMH: CustomMessageHandler + 'static + Send + Sync {
+ loop {
+ if event_receiver.recv().await.is_none() {
+ return;
+ }
+ peer_manager.process_events();
+ }
+ }
+
async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
- CMH: ChannelMessageHandler + 'static,
- RMH: RoutingMessageHandler + 'static,
- L: Logger + 'static + ?Sized,
- UMH: CustomMessageHandler + 'static {
+ CMH: ChannelMessageHandler + 'static + Send + Sync,
+ RMH: RoutingMessageHandler + 'static + Send + Sync,
+ L: Logger + 'static + ?Sized + Send + Sync,
+ UMH: CustomMessageHandler + 'static + Send + Sync {
+ // Create a waker to wake up poll_event_process, above
+ let (event_waker, event_receiver) = mpsc::channel(1);
+ tokio::spawn(Self::poll_event_process(Arc::clone(&peer_manager), event_receiver));
+
// 8KB is nice and big but also should never cause any issues with stack overflowing.
let mut buf = [0; 8192];
Err(_) => break Disconnect::PeerDisconnected,
},
}
- peer_manager.process_events();
+ let _ = event_waker.try_send(());
+
+ // At this point we've processed a message or two, and reset the ping timer for this
+ // peer, at least in the "are we still receiving messages" context, if we don't give up
+ // our timeslice to another task we may just spin on this peer, starving other peers
+ // and eventually disconnecting them for ping timeouts. Instead, we explicitly yield
+ // here.
+ tokio::task::yield_now().await;
};
let writer_option = us.lock().unwrap().writer.take();
if let Some(mut writer) = writer_option {
// pause read given we're now waiting on the remote end to ACK (and in
// accordance with the send_data() docs).
us.read_paused = true;
+ // Further, to avoid any current pending read causing a `read_event` call, wake
+ // up the read_waker and restart its loop.
+ let _ = us.read_waker.try_send(());
return written_len;
},
}