X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=sidebyside;f=lightning-net-tokio%2Fsrc%2Flib.rs;h=6e2a0c22eef397b18433aefa7a575cd2084ecc73;hb=refs%2Fheads%2F2021-05-no-control-chars;hp=08aa12571985a985f896929e2034a248ff77ca1a;hpb=5ada94046456909c07b135dca9ceac41963afdf4;p=rust-lightning diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 08aa1257..6e2a0c22 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -1,3 +1,12 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + //! A socket handling library for those running in Tokio environments who wish to use //! rust-lightning with native TcpStreams. //! @@ -15,21 +24,25 @@ //! The call site should, thus, look something like this: //! ``` //! use tokio::sync::mpsc; -//! use tokio::net::TcpStream; -//! use secp256k1::key::PublicKey; +//! use std::net::TcpStream; +//! use bitcoin::secp256k1::key::PublicKey; //! use lightning::util::events::EventsProvider; //! use std::net::SocketAddr; //! use std::sync::Arc; //! //! // Define concrete types for our high-level objects: -//! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface; -//! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator; -//! type ChannelMonitor = lightning::ln::channelmonitor::SimpleManyChannelMonitor, Arc>; -//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager; -//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager; +//! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync; +//! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync; +//! type Logger = dyn lightning::util::logger::Logger + Send + Sync; +//! type ChainAccess = dyn lightning::chain::Access + Send + Sync; +//! type ChainFilter = dyn lightning::chain::Filter + Send + Sync; +//! type DataPersister = dyn lightning::chain::channelmonitor::Persist + Send + Sync; +//! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor, Arc, Arc, Arc, Arc>; +//! type ChannelManager = Arc>; +//! type PeerManager = Arc>; //! //! // Connect to node with pubkey their_node_id at addr: -//! async fn connect_to_node(peer_manager: PeerManager, channel_monitor: Arc, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) { +//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) { //! let (sender, mut receiver) = mpsc::channel(2); //! lightning_net_tokio::connect_outbound(peer_manager, sender, their_node_id, addr).await; //! loop { @@ -37,14 +50,14 @@ //! for _event in channel_manager.get_and_clear_pending_events().drain(..) { //! // Handle the event! //! } -//! for _event in channel_monitor.get_and_clear_pending_events().drain(..) { +//! for _event in chain_monitor.get_and_clear_pending_events().drain(..) { //! // Handle the event! //! } //! } //! } //! //! // Begin reading from a newly accepted socket and talk to the peer: -//! async fn accept_socket(peer_manager: PeerManager, channel_monitor: Arc, channel_manager: ChannelManager, socket: TcpStream) { +//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc, channel_manager: ChannelManager, socket: TcpStream) { //! let (sender, mut receiver) = mpsc::channel(2); //! lightning_net_tokio::setup_inbound(peer_manager, sender, socket); //! loop { @@ -52,14 +65,17 @@ //! for _event in channel_manager.get_and_clear_pending_events().drain(..) { //! // Handle the event! //! } -//! for _event in channel_monitor.get_and_clear_pending_events().drain(..) { +//! for _event in chain_monitor.get_and_clear_pending_events().drain(..) { //! // Handle the event! //! } //! } //! } //! ``` -use secp256k1::key::PublicKey; +#![deny(broken_intra_doc_links)] +#![deny(missing_docs)] + +use bitcoin::secp256k1::key::PublicKey; use tokio::net::TcpStream; use tokio::{io, time}; @@ -68,10 +84,12 @@ use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; use lightning::ln::peer_handler; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; -use lightning::ln::msgs::ChannelMessageHandler; +use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; +use lightning::util::logger::Logger; -use std::task; +use std::{task, thread}; use std::net::SocketAddr; +use std::net::TcpStream as StdTcpStream; use std::sync::{Arc, Mutex, MutexGuard}; use std::sync::atomic::{AtomicU64, Ordering}; use std::time::Duration; @@ -87,7 +105,7 @@ struct Connection { event_notify: mpsc::Sender<()>, // Because our PeerManager is templated by user-provided types, and we can't (as far as I can // tell) have a const RawWakerVTable built out of templated functions, we need some indirection - // between being woken up with write-ready and calling PeerManager::write_buffer_spce_avail. + // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail. // This provides that indirection, with a Sender which gets handed to the PeerManager Arc on // the schedule_read stack. // @@ -101,6 +119,11 @@ struct Connection { // socket. To wake it up (without otherwise changing its state, we can push a value into this // Sender. read_waker: mpsc::Sender<()>, + // When we are told by rust-lightning to disconnect, we can't return to rust-lightning until we + // are sure we won't call any more read/write PeerManager functions with the same connection. + // This is set to true if we're in such a condition (with disconnect checked before with the + // top-level mutex held) and false when we can return. + block_disconnect_socket: bool, read_paused: bool, rl_requested_disconnect: bool, id: u64, @@ -116,7 +139,10 @@ impl Connection { _ => panic!() } } - async fn schedule_read(peer_manager: Arc>>, us: Arc>, mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) { + async fn schedule_read(peer_manager: Arc, Arc, Arc>>, us: Arc>, mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where + CMH: ChannelMessageHandler + 'static, + RMH: RoutingMessageHandler + 'static, + L: Logger + 'static + ?Sized { let peer_manager_ref = peer_manager.clone(); // 8KB is nice and big but also should never cause any issues with stack overflowing. let mut buf = [0; 8192]; @@ -134,7 +160,7 @@ impl Connection { // In this case, we do need to call peer_manager.socket_disconnected() to inform // Rust-Lightning that the socket is gone. PeerDisconnected - }; + } let disconnect_type = loop { macro_rules! shutdown_socket { ($err: expr, $need_disconnect: expr) => { { @@ -143,28 +169,35 @@ impl Connection { } } } + macro_rules! prepare_read_write_call { + () => { { + let mut us_lock = us.lock().unwrap(); + if us_lock.rl_requested_disconnect { + shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection); + } + us_lock.block_disconnect_socket = true; + } } + } + let read_paused = us.lock().unwrap().read_paused; tokio::select! { v = write_avail_receiver.recv() => { assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc! - if us.lock().unwrap().rl_requested_disconnect { - shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection); - } + prepare_read_write_call!(); if let Err(e) = peer_manager.write_buffer_space_avail(&mut our_descriptor) { shutdown_socket!(e, Disconnect::CloseConnection); } + us.lock().unwrap().block_disconnect_socket = false; }, _ = read_wake_receiver.recv() => {}, read = reader.read(&mut buf), if !read_paused => match read { Ok(0) => shutdown_socket!("Connection closed", Disconnect::PeerDisconnected), Ok(len) => { - if us.lock().unwrap().rl_requested_disconnect { - shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection); - } + prepare_read_write_call!(); let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]); + let mut us_lock = us.lock().unwrap(); match read_res { Ok(pause_read) => { - let mut us_lock = us.lock().unwrap(); if pause_read { us_lock.read_paused = true; } @@ -172,6 +205,7 @@ impl Connection { }, Err(e) => shutdown_socket!(e, Disconnect::CloseConnection), } + us_lock.block_disconnect_socket = false; }, Err(e) => shutdown_socket!(e, Disconnect::PeerDisconnected), }, @@ -188,7 +222,7 @@ impl Connection { } } - fn new(event_notify: mpsc::Sender<()>, stream: TcpStream) -> (io::ReadHalf, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { + fn new(event_notify: mpsc::Sender<()>, stream: StdTcpStream) -> (io::ReadHalf, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { // We only ever need a channel of depth 1 here: if we returned a non-full write to the // PeerManager, we will eventually get notified that there is room in the socket to write // new bytes, which will generate an event. That event will be popped off the queue before @@ -199,12 +233,13 @@ impl Connection { // we shove a value into the channel which comes after we've reset the read_paused bool to // false. let (read_waker, read_receiver) = mpsc::channel(1); - let (reader, writer) = io::split(stream); + stream.set_nonblocking(true).unwrap(); + let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap()); (reader, write_receiver, read_receiver, Arc::new(Mutex::new(Self { - writer: Some(writer), event_notify, write_avail, read_waker, - read_paused: false, rl_requested_disconnect: false, + writer: Some(writer), event_notify, write_avail, read_waker, read_paused: false, + block_disconnect_socket: false, rl_requested_disconnect: false, id: ID_COUNTER.fetch_add(1, Ordering::AcqRel) }))) } @@ -218,7 +253,10 @@ impl Connection { /// not need to poll the provided future in order to make progress. /// /// See the module-level documentation for how to handle the event_notify mpsc::Sender. -pub fn setup_inbound(peer_manager: Arc>>, event_notify: mpsc::Sender<()>, stream: TcpStream) -> impl std::future::Future { +pub fn setup_inbound(peer_manager: Arc, Arc, Arc>>, event_notify: mpsc::Sender<()>, stream: StdTcpStream) -> impl std::future::Future where + CMH: ChannelMessageHandler + 'static + Send + Sync, + RMH: RoutingMessageHandler + 'static + Send + Sync, + L: Logger + 'static + ?Sized + Send + Sync { let (reader, write_receiver, read_receiver, us) = Connection::new(event_notify, stream); #[cfg(debug_assertions)] let last_us = Arc::clone(&us); @@ -257,19 +295,39 @@ pub fn setup_inbound(peer_manager: Arc(peer_manager: Arc>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: TcpStream) -> impl std::future::Future { - let (reader, write_receiver, read_receiver, us) = Connection::new(event_notify, stream); +pub fn setup_outbound(peer_manager: Arc, Arc, Arc>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future where + CMH: ChannelMessageHandler + 'static + Send + Sync, + RMH: RoutingMessageHandler + 'static + Send + Sync, + L: Logger + 'static + ?Sized + Send + Sync { + let (reader, mut write_receiver, read_receiver, us) = Connection::new(event_notify, stream); #[cfg(debug_assertions)] let last_us = Arc::clone(&us); let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) { Some(tokio::spawn(async move { - if SocketDescriptor::new(us.clone()).send_data(&initial_send, true) != initial_send.len() { - // We should essentially always have enough room in a TCP socket buffer to send the - // initial 10s of bytes, if not, just give up as hopeless. - eprintln!("Failed to write first full message to socket!"); - peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us))); - } else { + // We should essentially always have enough room in a TCP socket buffer to send the + // initial 10s of bytes. However, tokio running in single-threaded mode will always + // fail writes and wake us back up later to write. Thus, we handle a single + // std::task::Poll::Pending but still expect to write the full set of bytes at once + // and use a relatively tight timeout. + if let Ok(Ok(())) = tokio::time::timeout(Duration::from_millis(100), async { + loop { + match SocketDescriptor::new(us.clone()).send_data(&initial_send, true) { + v if v == initial_send.len() => break Ok(()), + 0 => { + write_receiver.recv().await; + // In theory we could check for if we've been instructed to disconnect + // the peer here, but its OK to just skip it - we'll check for it in + // schedule_read prior to any relevant calls into RL. + }, + _ => { + eprintln!("Failed to write first full message to socket!"); + peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us))); + break Err(()); + } + } + } + }).await { Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver).await; } })) @@ -309,8 +367,11 @@ pub fn setup_outbound(peer_manager: Arc(peer_manager: Arc>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) -> Option> { - if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), TcpStream::connect(&addr)).await { +pub async fn connect_outbound(peer_manager: Arc, Arc, Arc>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) -> Option> where + CMH: ChannelMessageHandler + 'static + Send + Sync, + RMH: RoutingMessageHandler + 'static + Send + Sync, + L: Logger + 'static + ?Sized + Send + Sync { + if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await { Some(setup_outbound(peer_manager, event_notify, their_node_id, stream)) } else { None } } @@ -332,7 +393,7 @@ fn wake_socket_waker(orig_ptr: *const ()) { } fn wake_socket_waker_by_ref(orig_ptr: *const ()) { let sender_ptr = orig_ptr as *const mpsc::Sender<()>; - let mut sender = unsafe { (*sender_ptr).clone() }; + let sender = unsafe { (*sender_ptr).clone() }; let _ = sender.try_send(()); } fn drop_socket_waker(orig_ptr: *const ()) { @@ -411,15 +472,18 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { } fn disconnect_socket(&mut self) { - let mut us = self.conn.lock().unwrap(); - us.rl_requested_disconnect = true; - us.read_paused = true; - // Wake up the sending thread, assuming it is still alive - let _ = us.write_avail.try_send(()); - // TODO: There's a race where we don't meet the requirements of disconnect_socket if the - // read task is about to call a PeerManager function (eg read_event or write_event). - // Ideally we need to release the us lock and block until we have confirmation from the - // read task that it has broken out of its main loop. + { + let mut us = self.conn.lock().unwrap(); + us.rl_requested_disconnect = true; + us.read_paused = true; + // Wake up the sending thread, assuming it is still alive + let _ = us.write_avail.try_send(()); + // Happy-path return: + if !us.block_disconnect_socket { return; } + } + while self.conn.lock().unwrap().block_disconnect_socket { + thread::yield_now(); + } } } impl Clone for SocketDescriptor { @@ -448,11 +512,12 @@ mod tests { use lightning::ln::msgs::*; use lightning::ln::peer_handler::{MessageHandler, PeerManager}; use lightning::util::events::*; - use secp256k1::{Secp256k1, SecretKey, PublicKey}; + use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey}; use tokio::sync::mpsc; use std::mem; + use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -467,6 +532,7 @@ mod tests { expected_pubkey: PublicKey, pubkey_connected: mpsc::Sender<()>, pubkey_disconnected: mpsc::Sender<()>, + disconnected_flag: AtomicBool, msg_events: Mutex>, } impl RoutingMessageHandler for MsgHandler { @@ -474,9 +540,13 @@ mod tests { fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result { Ok(false) } fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result { Ok(false) } fn handle_htlc_fail_channel_update(&self, _update: &HTLCFailChannelUpdate) { } - fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, ChannelUpdate, ChannelUpdate)> { Vec::new() } + fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option, Option)> { Vec::new() } fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec { Vec::new() } - fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { false } + fn sync_routing_table(&self, _their_node_id: &PublicKey, _init_msg: &Init) { } + fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } + fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } + fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) } + fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) } } impl ChannelMessageHandler for MsgHandler { fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &OpenChannel) {} @@ -484,7 +554,7 @@ mod tests { fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {} fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {} fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &FundingLocked) {} - fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &Shutdown) {} + fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, _msg: &Shutdown) {} fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {} fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {} fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateFulfillHTLC) {} @@ -494,8 +564,10 @@ mod tests { fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &RevokeAndACK) {} fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {} fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {} + fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {} fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) { if *their_node_id == self.expected_pubkey { + self.disconnected_flag.store(true, Ordering::SeqCst); self.pubkey_disconnected.clone().try_send(()).unwrap(); } } @@ -515,8 +587,7 @@ mod tests { } } - #[tokio::test(threaded_scheduler)] - async fn basic_connection_test() { + async fn do_basic_connection_test() { let secp_ctx = Secp256k1::new(); let a_key = SecretKey::from_slice(&[1; 32]).unwrap(); let b_key = SecretKey::from_slice(&[1; 32]).unwrap(); @@ -529,11 +600,12 @@ mod tests { expected_pubkey: b_pub, pubkey_connected: a_connected_sender, pubkey_disconnected: a_disconnected_sender, + disconnected_flag: AtomicBool::new(false), msg_events: Mutex::new(Vec::new()), }); let a_manager = Arc::new(PeerManager::new(MessageHandler { chan_handler: Arc::clone(&a_handler), - route_handler: Arc::clone(&a_handler) as Arc, + route_handler: Arc::clone(&a_handler), }, a_key.clone(), &[1; 32], Arc::new(TestLogger()))); let (b_connected_sender, mut b_connected) = mpsc::channel(1); @@ -542,11 +614,12 @@ mod tests { expected_pubkey: a_pub, pubkey_connected: b_connected_sender, pubkey_disconnected: b_disconnected_sender, + disconnected_flag: AtomicBool::new(false), msg_events: Mutex::new(Vec::new()), }); let b_manager = Arc::new(PeerManager::new(MessageHandler { chan_handler: Arc::clone(&b_handler), - route_handler: Arc::clone(&b_handler) as Arc, + route_handler: Arc::clone(&b_handler), }, b_key.clone(), &[2; 32], Arc::new(TestLogger()))); // We bind on localhost, hoping the environment is properly configured with a local @@ -562,8 +635,8 @@ mod tests { } else { panic!("Failed to bind to v4 localhost on common ports"); }; let (sender, _receiver) = mpsc::channel(2); - let fut_a = super::setup_outbound(Arc::clone(&a_manager), sender.clone(), b_pub, tokio::net::TcpStream::from_std(conn_a).unwrap()); - let fut_b = super::setup_inbound(b_manager, sender, tokio::net::TcpStream::from_std(conn_b).unwrap()); + let fut_a = super::setup_outbound(Arc::clone(&a_manager), sender.clone(), b_pub, conn_a); + let fut_b = super::setup_inbound(b_manager, sender, conn_b); tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap(); tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap(); @@ -571,14 +644,25 @@ mod tests { a_handler.msg_events.lock().unwrap().push(MessageSendEvent::HandleError { node_id: b_pub, action: ErrorAction::DisconnectPeer { msg: None } }); - assert!(a_disconnected.try_recv().is_err()); - assert!(b_disconnected.try_recv().is_err()); + assert!(!a_handler.disconnected_flag.load(Ordering::SeqCst)); + assert!(!b_handler.disconnected_flag.load(Ordering::SeqCst)); a_manager.process_events(); tokio::time::timeout(Duration::from_secs(10), a_disconnected.recv()).await.unwrap(); tokio::time::timeout(Duration::from_secs(1), b_disconnected.recv()).await.unwrap(); + assert!(a_handler.disconnected_flag.load(Ordering::SeqCst)); + assert!(b_handler.disconnected_flag.load(Ordering::SeqCst)); fut_a.await; fut_b.await; } + + #[tokio::test(flavor = "multi_thread")] + async fn basic_threaded_connection_test() { + do_basic_connection_test().await; + } + #[tokio::test] + async fn basic_unthreaded_connection_test() { + do_basic_connection_test().await; + } }