From: Matt Corallo Date: Sun, 24 Apr 2022 20:57:53 +0000 (+0000) Subject: [lightning-net-tokio] Fix race-y unwrap fetching peer socket address X-Git-Tag: v0.0.107~40^2~1 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=fca67bcaa4c7bf36da7cebf142b0baf33488909d;p=rust-lightning [lightning-net-tokio] Fix race-y unwrap fetching peer socket address I recently saw the following panic on one of my test nodes: ``` thread 'tokio-runtime-worker' panicked at 'called `Result::unwrap()` on an `Err` value: Os { code: 107, kind: NotConnected, message: "Transport endpoint is not connected" }', rust-lightning/lightning-net-tokio/src/lib.rs:250:38 ``` Presumably what happened is somehow the connection was closed in between us accepting it and us going to start processing it. While this is a somewhat surprising race, its clearly reachable. The fix proposed here is quite trivial - simply don't `unwrap` trying to fetch our peer's socket address, instead treat the peer address as `None` and discover the disconnection later when we go to read. --- diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index a9fd861bc..5e1aa40b3 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -85,7 +85,6 @@ use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddre use lightning::util::logger::Logger; use std::task; -use std::net::IpAddr; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::sync::{Arc, Mutex}; @@ -212,6 +211,20 @@ impl Connection { } } +fn get_addr_from_stream(stream: &StdTcpStream) -> Option { + match stream.peer_addr() { + Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 { + addr: sockaddr.ip().octets(), + port: sockaddr.port(), + }), + Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 { + addr: sockaddr.ip().octets(), + port: sockaddr.port(), + }), + Err(_) => None, + } +} + /// Process incoming messages and feed outgoing messages on the provided socket generated by /// accepting an incoming connection. /// @@ -223,21 +236,12 @@ pub fn setup_inbound(peer_manager: Arc Some(NetAddress::IPv4 { - addr: ip.octets(), - port: ip_addr.port(), - }), - IpAddr::V6(ip) => Some(NetAddress::IPv6 { - addr: ip.octets(), - port: ip_addr.port(), - }), - }) { + let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) { Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver))) } else { // Note that we will skip socket_disconnected here, in accordance with the PeerManager @@ -274,20 +278,11 @@ pub fn setup_outbound(peer_manager: Arc Some(NetAddress::IPv4 { - addr: ip.octets(), - port: ip_addr.port(), - }), - IpAddr::V6(ip) => Some(NetAddress::IPv6 { - addr: ip.octets(), - port: ip_addr.port(), - }), - }) { + let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) { Some(tokio::spawn(async move { // We should essentially always have enough room in a TCP socket buffer to send the // initial 10s of bytes. However, tokio running in single-threaded mode will always