// licenses.
//! A socket handling library for those running in Tokio environments who wish to use
-//! rust-lightning with native TcpStreams.
+//! rust-lightning with native [`TcpStream`]s.
//!
//! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a
-//! TcpStream and a reference to a PeerManager and the rest is handled", except for the
-//! [Event](../lightning/util/events/enum.Event.html) handling mechanism; see example below.
+//! [`TcpStream`] and a reference to a [`PeerManager`] and the rest is handled".
//!
-//! The PeerHandler, due to the fire-and-forget nature of this logic, must be an Arc, and must use
-//! the SocketDescriptor provided here as the PeerHandler's SocketDescriptor.
+//! The [`PeerManager`], due to the fire-and-forget nature of this logic, must be a reference,
+//! (e.g. an [`Arc`]) and must use the [`SocketDescriptor`] provided here as the [`PeerManager`]'s
+//! `SocketDescriptor` implementation.
//!
-//! Three methods are exposed to register a new connection for handling in tokio::spawn calls; see
-//! their individual docs for details.
+//! Three methods are exposed to register a new connection for handling in [`tokio::spawn`] calls;
+//! see their individual docs for details.
//!
-//! # Example
-//! ```
-//! use std::net::TcpStream;
-//! use bitcoin::secp256k1::PublicKey;
-//! use lightning::util::events::{Event, EventHandler, EventsProvider};
-//! use std::net::SocketAddr;
-//! use std::sync::Arc;
-//!
-//! // Define concrete types for our high-level objects:
-//! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
-//! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
-//! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
-//! type ChainAccess = dyn lightning::chain::Access + Send + Sync;
-//! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
-//! type DataPersister = dyn lightning::chain::chainmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
-//! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
-//! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
-//! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>>;
-//!
-//! // Connect to node with pubkey their_node_id at addr:
-//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
-//! lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await;
-//! loop {
-//! let event_handler = |event: &Event| {
-//! // Handle the event!
-//! };
-//! channel_manager.await_persistable_update();
-//! channel_manager.process_pending_events(&event_handler);
-//! chain_monitor.process_pending_events(&event_handler);
-//! }
-//! }
-//!
-//! // Begin reading from a newly accepted socket and talk to the peer:
-//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
-//! lightning_net_tokio::setup_inbound(peer_manager, socket);
-//! loop {
-//! let event_handler = |event: &Event| {
-//! // Handle the event!
-//! };
-//! channel_manager.await_persistable_update();
-//! channel_manager.process_pending_events(&event_handler);
-//! chain_monitor.process_pending_events(&event_handler);
-//! }
-//! }
-//! ```
+//! [`PeerManager`]: lightning::ln::peer_handler::PeerManager
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
#![deny(broken_intra_doc_links)]
-#![deny(missing_docs)]
+#![deny(private_intra_doc_links)]
+#![deny(missing_docs)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use bitcoin::secp256k1::PublicKey;
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
-use lightning::ln::peer_handler::CustomMessageHandler;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
-use lightning::util::logger::Logger;
+use lightning::ln::peer_handler::APeerManager;
+use lightning::ln::msgs::NetAddress;
+use std::ops::Deref;
use std::task;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
id: u64,
}
impl Connection {
- async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
- CMH: ChannelMessageHandler + 'static,
- RMH: RoutingMessageHandler + 'static,
- L: Logger + 'static + ?Sized,
- UMH: CustomMessageHandler + 'static {
- // 8KB is nice and big but also should never cause any issues with stack overflowing.
- let mut buf = [0; 8192];
+ async fn poll_event_process<PM: Deref + 'static + Send + Sync>(
+ peer_manager: PM,
+ mut event_receiver: mpsc::Receiver<()>,
+ ) where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
+ loop {
+ if event_receiver.recv().await.is_none() {
+ return;
+ }
+ peer_manager.as_ref().process_events();
+ }
+ }
+
+ async fn schedule_read<PM: Deref + 'static + Send + Sync + Clone>(
+ peer_manager: PM,
+ us: Arc<Mutex<Self>>,
+ mut reader: io::ReadHalf<TcpStream>,
+ mut read_wake_receiver: mpsc::Receiver<()>,
+ mut write_avail_receiver: mpsc::Receiver<()>,
+ ) where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
+ // Create a waker to wake up poll_event_process, above
+ let (event_waker, event_receiver) = mpsc::channel(1);
+ tokio::spawn(Self::poll_event_process(peer_manager.clone(), event_receiver));
+
+ // 4KiB is nice and big without handling too many messages all at once, giving other peers
+ // a chance to do some work.
+ let mut buf = [0; 4096];
let mut our_descriptor = SocketDescriptor::new(us.clone());
// An enum describing why we did/are disconnecting:
tokio::select! {
v = write_avail_receiver.recv() => {
assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
- if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
+ if peer_manager.as_ref().write_buffer_space_avail(&mut our_descriptor).is_err() {
break Disconnect::CloseConnection;
}
},
read = reader.read(&mut buf), if !read_paused => match read {
Ok(0) => break Disconnect::PeerDisconnected,
Ok(len) => {
- let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
+ let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
let mut us_lock = us.lock().unwrap();
match read_res {
Ok(pause_read) => {
Err(_) => break Disconnect::PeerDisconnected,
},
}
- peer_manager.process_events();
+ let _ = event_waker.try_send(());
+
+ // At this point we've processed a message or two, and reset the ping timer for this
+ // peer, at least in the "are we still receiving messages" context, if we don't give up
+ // our timeslice to another task we may just spin on this peer, starving other peers
+ // and eventually disconnecting them for ping timeouts. Instead, we explicitly yield
+ // here.
+ let _ = tokio::task::yield_now().await;
};
let writer_option = us.lock().unwrap().writer.take();
if let Some(mut writer) = writer_option {
let _ = writer.shutdown().await;
}
if let Disconnect::PeerDisconnected = disconnect_type {
- peer_manager.socket_disconnected(&our_descriptor);
- peer_manager.process_events();
+ peer_manager.as_ref().socket_disconnected(&our_descriptor);
+ peer_manager.as_ref().process_events();
}
}
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
-pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
- CMH: ChannelMessageHandler + 'static + Send + Sync,
- RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync,
- UMH: CustomMessageHandler + 'static + Send + Sync {
+pub fn setup_inbound<PM: Deref + 'static + Send + Sync + Clone>(
+ peer_manager: PM,
+ stream: StdTcpStream,
+) -> impl std::future::Future<Output=()>
+where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
let remote_addr = get_addr_from_stream(&stream);
let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
- #[cfg(debug_assertions)]
+ #[cfg(test)]
let last_us = Arc::clone(&us);
- let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
+ let handle_opt = if peer_manager.as_ref().new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr).is_ok() {
Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
} else {
// Note that we will skip socket_disconnected here, in accordance with the PeerManager
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
// keep too many wakers around, this makes sense. The race should be rare (we do
// some work after shutdown()) and an error would be a major memory leak.
- #[cfg(debug_assertions)]
- assert!(Arc::try_unwrap(last_us).is_ok());
+ #[cfg(test)]
+ debug_assert!(Arc::try_unwrap(last_us).is_ok());
}
}
}
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
-pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
- CMH: ChannelMessageHandler + 'static + Send + Sync,
- RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync,
- UMH: CustomMessageHandler + 'static + Send + Sync {
+pub fn setup_outbound<PM: Deref + 'static + Send + Sync + Clone>(
+ peer_manager: PM,
+ their_node_id: PublicKey,
+ stream: StdTcpStream,
+) -> impl std::future::Future<Output=()>
+where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
let remote_addr = get_addr_from_stream(&stream);
let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
- #[cfg(debug_assertions)]
+ #[cfg(test)]
let last_us = Arc::clone(&us);
- let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
+ let handle_opt = if let Ok(initial_send) = peer_manager.as_ref().new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
Some(tokio::spawn(async move {
// We should essentially always have enough room in a TCP socket buffer to send the
// initial 10s of bytes. However, tokio running in single-threaded mode will always
},
_ => {
eprintln!("Failed to write first full message to socket!");
- peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us)));
+ peer_manager.as_ref().socket_disconnected(&SocketDescriptor::new(Arc::clone(&us)));
break Err(());
}
}
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
// keep too many wakers around, this makes sense. The race should be rare (we do
// some work after shutdown()) and an error would be a major memory leak.
- #[cfg(debug_assertions)]
- assert!(Arc::try_unwrap(last_us).is_ok());
+ #[cfg(test)]
+ debug_assert!(Arc::try_unwrap(last_us).is_ok());
}
}
}
/// disconnected and associated handling futures are freed, though, because all processing in said
/// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
/// make progress.
-pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
- CMH: ChannelMessageHandler + 'static + Send + Sync,
- RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync,
- UMH: CustomMessageHandler + 'static + Send + Sync {
+pub async fn connect_outbound<PM: Deref + 'static + Send + Sync + Clone>(
+ peer_manager: PM,
+ their_node_id: PublicKey,
+ addr: SocketAddr,
+) -> Option<impl std::future::Future<Output=()>>
+where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
Some(setup_outbound(peer_manager, their_node_id, stream))
} else { None }
// pause read given we're now waiting on the remote end to ACK (and in
// accordance with the send_data() docs).
us.read_paused = true;
+ // Further, to avoid any current pending read causing a `read_event` call, wake
+ // up the read_waker and restart its loop.
+ let _ = us.read_waker.try_send(());
return written_len;
},
}
use lightning::ln::features::*;
use lightning::ln::msgs::*;
use lightning::ln::peer_handler::{MessageHandler, PeerManager};
- use lightning::util::events::*;
+ use lightning::ln::features::NodeFeatures;
+ use lightning::routing::gossip::NodeId;
+ use lightning::events::*;
+ use lightning::util::test_utils::TestNodeSigner;
use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey};
use tokio::sync::mpsc;
fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
- fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { Vec::new() }
- fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<NodeAnnouncement> { Vec::new() }
- fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
+ fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { None }
+ fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<NodeAnnouncement> { None }
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
+ fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+ fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
+ fn processing_queue_high(&self) -> bool { false }
}
impl ChannelMessageHandler for MsgHandler {
- fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &OpenChannel) {}
- fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &AcceptChannel) {}
+ fn handle_open_channel(&self, _their_node_id: &PublicKey, _msg: &OpenChannel) {}
+ fn handle_accept_channel(&self, _their_node_id: &PublicKey, _msg: &AcceptChannel) {}
fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {}
fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {}
- fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &FundingLocked) {}
- fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, _msg: &Shutdown) {}
+ fn handle_channel_ready(&self, _their_node_id: &PublicKey, _msg: &ChannelReady) {}
+ fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &Shutdown) {}
fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {}
fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {}
fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFulfillHTLC) {}
fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
- fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
+ fn handle_open_channel_v2(&self, _their_node_id: &PublicKey, _msg: &OpenChannelV2) {}
+ fn handle_accept_channel_v2(&self, _their_node_id: &PublicKey, _msg: &AcceptChannelV2) {}
+ fn handle_tx_add_input(&self, _their_node_id: &PublicKey, _msg: &TxAddInput) {}
+ fn handle_tx_add_output(&self, _their_node_id: &PublicKey, _msg: &TxAddOutput) {}
+ fn handle_tx_remove_input(&self, _their_node_id: &PublicKey, _msg: &TxRemoveInput) {}
+ fn handle_tx_remove_output(&self, _their_node_id: &PublicKey, _msg: &TxRemoveOutput) {}
+ fn handle_tx_complete(&self, _their_node_id: &PublicKey, _msg: &TxComplete) {}
+ fn handle_tx_signatures(&self, _their_node_id: &PublicKey, _msg: &TxSignatures) {}
+ fn handle_tx_init_rbf(&self, _their_node_id: &PublicKey, _msg: &TxInitRbf) {}
+ fn handle_tx_ack_rbf(&self, _their_node_id: &PublicKey, _msg: &TxAckRbf) {}
+ fn handle_tx_abort(&self, _their_node_id: &PublicKey, _msg: &TxAbort) {}
+ fn peer_disconnected(&self, their_node_id: &PublicKey) {
if *their_node_id == self.expected_pubkey {
self.disconnected_flag.store(true, Ordering::SeqCst);
self.pubkey_disconnected.clone().try_send(()).unwrap();
}
}
- fn peer_connected(&self, their_node_id: &PublicKey, _msg: &Init) {
+ fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> {
if *their_node_id == self.expected_pubkey {
self.pubkey_connected.clone().try_send(()).unwrap();
}
+ Ok(())
}
fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &ChannelReestablish) {}
fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {}
+ fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+ fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
}
impl MessageSendEventsProvider for MsgHandler {
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let a_manager = Arc::new(PeerManager::new(MessageHandler {
chan_handler: Arc::clone(&a_handler),
route_handler: Arc::clone(&a_handler),
- }, a_key.clone(), &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
+ onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
+ custom_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
+ }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(TestNodeSigner::new(a_key))));
let (b_connected_sender, mut b_connected) = mpsc::channel(1);
let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1);
let b_manager = Arc::new(PeerManager::new(MessageHandler {
chan_handler: Arc::clone(&b_handler),
route_handler: Arc::clone(&b_handler),
- }, b_key.clone(), &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
+ onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
+ custom_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
+ }, 0, &[2; 32], Arc::new(TestLogger()), Arc::new(TestNodeSigner::new(b_key))));
// We bind on localhost, hoping the environment is properly configured with a local
// address. This may not always be the case in containers and the like, so if this test is
let a_manager = Arc::new(PeerManager::new(MessageHandler {
chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()),
+ onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
- }, a_key, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
+ custom_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
+ }, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(TestNodeSigner::new(a_key))));
// Make two connections, one for an inbound and one for an outbound connection
let conn_a = {