//! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
//! type ChainAccess = dyn lightning::chain::Access + Send + Sync;
//! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
-//! type DataPersister = dyn lightning::chain::channelmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
+//! type DataPersister = dyn lightning::chain::chainmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
//! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
//! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
//! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>>;
//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
//! lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await;
//! loop {
-//! channel_manager.await_persistable_update();
-//! channel_manager.process_pending_events(&|event| {
-//! // Handle the event!
-//! });
-//! chain_monitor.process_pending_events(&|event| {
+//! let event_handler = |event: &Event| {
//! // Handle the event!
-//! });
+//! };
+//! channel_manager.await_persistable_update();
+//! channel_manager.process_pending_events(&event_handler);
+//! chain_monitor.process_pending_events(&event_handler);
//! }
//! }
//!
//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
//! lightning_net_tokio::setup_inbound(peer_manager, socket);
//! loop {
-//! channel_manager.await_persistable_update();
-//! channel_manager.process_pending_events(&|event| {
-//! // Handle the event!
-//! });
-//! chain_monitor.process_pending_events(&|event| {
+//! let event_handler = |event: &Event| {
//! // Handle the event!
-//! });
+//! };
+//! channel_manager.await_persistable_update();
+//! channel_manager.process_pending_events(&event_handler);
+//! chain_monitor.process_pending_events(&event_handler);
//! }
//! }
//! ```
#![deny(broken_intra_doc_links)]
#![deny(missing_docs)]
+#![cfg_attr(docsrs, feature(doc_auto_cfg))]
+
use bitcoin::secp256k1::key::PublicKey;
use tokio::net::TcpStream;
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+use lightning::ln::peer_handler::CustomMessageHandler;
+use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
use lightning::util::logger::Logger;
-use std::{task, thread};
+use std::task;
+use std::net::IpAddr;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
use std::sync::{Arc, Mutex};
// socket. To wake it up (without otherwise changing its state, we can push a value into this
// Sender.
read_waker: mpsc::Sender<()>,
- // When we are told by rust-lightning to disconnect, we can't return to rust-lightning until we
- // are sure we won't call any more read/write PeerManager functions with the same connection.
- // This is set to true if we're in such a condition (with disconnect checked before with the
- // top-level mutex held) and false when we can return.
- block_disconnect_socket: bool,
read_paused: bool,
rl_requested_disconnect: bool,
id: u64,
}
impl Connection {
- async fn schedule_read<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
+ async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
CMH: ChannelMessageHandler + 'static,
RMH: RoutingMessageHandler + 'static,
- L: Logger + 'static + ?Sized {
+ L: Logger + 'static + ?Sized,
+ UMH: CustomMessageHandler + 'static {
// 8KB is nice and big but also should never cause any issues with stack overflowing.
let mut buf = [0; 8192];
PeerDisconnected
}
let disconnect_type = loop {
- macro_rules! shutdown_socket {
- ($err: expr, $need_disconnect: expr) => { {
- println!("Disconnecting peer due to {}!", $err);
- break $need_disconnect;
- } }
- }
-
- macro_rules! prepare_read_write_call {
- () => { {
- let mut us_lock = us.lock().unwrap();
- if us_lock.rl_requested_disconnect {
- shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection);
- }
- us_lock.block_disconnect_socket = true;
- } }
- }
-
- let read_paused = us.lock().unwrap().read_paused;
+ let read_paused = {
+ let us_lock = us.lock().unwrap();
+ if us_lock.rl_requested_disconnect {
+ break Disconnect::CloseConnection;
+ }
+ us_lock.read_paused
+ };
tokio::select! {
v = write_avail_receiver.recv() => {
assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
- prepare_read_write_call!();
- if let Err(e) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
- shutdown_socket!(e, Disconnect::CloseConnection);
+ if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
+ break Disconnect::CloseConnection;
}
- us.lock().unwrap().block_disconnect_socket = false;
},
_ = read_wake_receiver.recv() => {},
read = reader.read(&mut buf), if !read_paused => match read {
- Ok(0) => shutdown_socket!("Connection closed", Disconnect::PeerDisconnected),
+ Ok(0) => break Disconnect::PeerDisconnected,
Ok(len) => {
- prepare_read_write_call!();
let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
let mut us_lock = us.lock().unwrap();
match read_res {
us_lock.read_paused = true;
}
},
- Err(e) => shutdown_socket!(e, Disconnect::CloseConnection),
+ Err(_) => break Disconnect::CloseConnection,
}
- us_lock.block_disconnect_socket = false;
},
- Err(e) => shutdown_socket!(e, Disconnect::PeerDisconnected),
+ Err(_) => break Disconnect::PeerDisconnected,
},
}
peer_manager.process_events();
(reader, write_receiver, read_receiver,
Arc::new(Mutex::new(Self {
writer: Some(writer), write_avail, read_waker, read_paused: false,
- block_disconnect_socket: false, rl_requested_disconnect: false,
+ rl_requested_disconnect: false,
id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
})))
}
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
-pub fn setup_inbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
CMH: ChannelMessageHandler + 'static + Send + Sync,
RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync {
+ L: Logger + 'static + ?Sized + Send + Sync,
+ UMH: CustomMessageHandler + 'static + Send + Sync {
+ let ip_addr = stream.peer_addr().unwrap();
let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(debug_assertions)]
let last_us = Arc::clone(&us);
- let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone())) {
+ let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), match ip_addr.ip() {
+ IpAddr::V4(ip) => Some(NetAddress::IPv4 {
+ addr: ip.octets(),
+ port: ip_addr.port(),
+ }),
+ IpAddr::V6(ip) => Some(NetAddress::IPv6 {
+ addr: ip.octets(),
+ port: ip_addr.port(),
+ }),
+ }) {
Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
} else {
// Note that we will skip socket_disconnected here, in accordance with the PeerManager
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
-pub fn setup_outbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
CMH: ChannelMessageHandler + 'static + Send + Sync,
RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync {
+ L: Logger + 'static + ?Sized + Send + Sync,
+ UMH: CustomMessageHandler + 'static + Send + Sync {
+ let ip_addr = stream.peer_addr().unwrap();
let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(debug_assertions)]
let last_us = Arc::clone(&us);
-
- let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) {
+ let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), match ip_addr.ip() {
+ IpAddr::V4(ip) => Some(NetAddress::IPv4 {
+ addr: ip.octets(),
+ port: ip_addr.port(),
+ }),
+ IpAddr::V6(ip) => Some(NetAddress::IPv6 {
+ addr: ip.octets(),
+ port: ip_addr.port(),
+ }),
+ }) {
Some(tokio::spawn(async move {
// We should essentially always have enough room in a TCP socket buffer to send the
// initial 10s of bytes. However, tokio running in single-threaded mode will always
/// disconnected and associated handling futures are freed, though, because all processing in said
/// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
/// make progress.
-pub async fn connect_outbound<CMH, RMH, L>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
+pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
CMH: ChannelMessageHandler + 'static + Send + Sync,
RMH: RoutingMessageHandler + 'static + Send + Sync,
- L: Logger + 'static + ?Sized + Send + Sync {
+ L: Logger + 'static + ?Sized + Send + Sync,
+ UMH: CustomMessageHandler + 'static + Send + Sync {
if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
Some(setup_outbound(peer_manager, their_node_id, stream))
} else { None }
}
fn disconnect_socket(&mut self) {
- {
- let mut us = self.conn.lock().unwrap();
- us.rl_requested_disconnect = true;
- us.read_paused = true;
- // Wake up the sending thread, assuming it is still alive
- let _ = us.write_avail.try_send(());
- // Happy-path return:
- if !us.block_disconnect_socket { return; }
- }
- while self.conn.lock().unwrap().block_disconnect_socket {
- thread::yield_now();
- }
+ let mut us = self.conn.lock().unwrap();
+ us.rl_requested_disconnect = true;
+ // Wake up the sending thread, assuming it is still alive
+ let _ = us.write_avail.try_send(());
}
}
impl Clone for SocketDescriptor {
fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
- fn handle_htlc_fail_channel_update(&self, _update: &HTLCFailChannelUpdate) { }
fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { Vec::new() }
fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<NodeAnnouncement> { Vec::new() }
- fn sync_routing_table(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
let a_manager = Arc::new(PeerManager::new(MessageHandler {
chan_handler: Arc::clone(&a_handler),
route_handler: Arc::clone(&a_handler),
- }, a_key.clone(), &[1; 32], Arc::new(TestLogger())));
+ }, a_key.clone(), &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
let (b_connected_sender, mut b_connected) = mpsc::channel(1);
let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1);
let b_manager = Arc::new(PeerManager::new(MessageHandler {
chan_handler: Arc::clone(&b_handler),
route_handler: Arc::clone(&b_handler),
- }, b_key.clone(), &[2; 32], Arc::new(TestLogger())));
+ }, b_key.clone(), &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
// We bind on localhost, hoping the environment is properly configured with a local
// address. This may not always be the case in containers and the like, so if this test is