]> git.bitcoin.ninja Git - rust-lightning/commitdiff
[lightning-net-tokio] Fix race-y unwrap fetching peer socket address
authorMatt Corallo <git@bluematt.me>
Sun, 24 Apr 2022 20:57:53 +0000 (20:57 +0000)
committerMatt Corallo <git@bluematt.me>
Sun, 24 Apr 2022 21:04:54 +0000 (21:04 +0000)
I recently saw the following panic on one of my test nodes:

```
thread 'tokio-runtime-worker' panicked at 'called `Result::unwrap()`
    on an `Err` value: Os { code: 107, kind: NotConnected, message:
    "Transport endpoint is not connected" }',
    rust-lightning/lightning-net-tokio/src/lib.rs:250:38
```

Presumably what happened is somehow the connection was closed in
between us accepting it and us going to start processing it. While
this is a somewhat surprising race, its clearly reachable. The fix
proposed here is quite trivial - simply don't `unwrap` trying to
fetch our peer's socket address, instead treat the peer address as
`None` and discover the disconnection later when we go to read.

lightning-net-tokio/src/lib.rs

index a9fd861bc846e440fd8ff54ab31deedf622aa8e6..5e1aa40b33968c831ec88c55d71c2ac8f8835a64 100644 (file)
@@ -85,7 +85,6 @@ use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddre
 use lightning::util::logger::Logger;
 
 use std::task;
-use std::net::IpAddr;
 use std::net::SocketAddr;
 use std::net::TcpStream as StdTcpStream;
 use std::sync::{Arc, Mutex};
@@ -212,6 +211,20 @@ impl Connection {
        }
 }
 
+fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
+       match stream.peer_addr() {
+               Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
+                       addr: sockaddr.ip().octets(),
+                       port: sockaddr.port(),
+               }),
+               Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
+                       addr: sockaddr.ip().octets(),
+                       port: sockaddr.port(),
+               }),
+               Err(_) => None,
+       }
+}
+
 /// Process incoming messages and feed outgoing messages on the provided socket generated by
 /// accepting an incoming connection.
 ///
@@ -223,21 +236,12 @@ pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManag
                RMH: RoutingMessageHandler + 'static + Send + Sync,
                L: Logger + 'static + ?Sized + Send + Sync,
                UMH: CustomMessageHandler + 'static + Send + Sync {
-       let ip_addr = stream.peer_addr().unwrap();
+       let remote_addr = get_addr_from_stream(&stream);
        let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
 
-       let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), match ip_addr.ip() {
-               IpAddr::V4(ip) => Some(NetAddress::IPv4 {
-                       addr: ip.octets(),
-                       port: ip_addr.port(),
-               }),
-               IpAddr::V6(ip) => Some(NetAddress::IPv6 {
-                       addr: ip.octets(),
-                       port: ip_addr.port(),
-               }),
-       }) {
+       let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
                Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
        } else {
                // Note that we will skip socket_disconnected here, in accordance with the PeerManager
@@ -274,20 +278,11 @@ pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerMana
                RMH: RoutingMessageHandler + 'static + Send + Sync,
                L: Logger + 'static + ?Sized + Send + Sync,
                UMH: CustomMessageHandler + 'static + Send + Sync {
-       let ip_addr = stream.peer_addr().unwrap();
+       let remote_addr = get_addr_from_stream(&stream);
        let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
-       let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), match ip_addr.ip() {
-               IpAddr::V4(ip) => Some(NetAddress::IPv4 {
-                       addr: ip.octets(),
-                       port: ip_addr.port(),
-               }),
-               IpAddr::V6(ip) => Some(NetAddress::IPv6 {
-                       addr: ip.octets(),
-                       port: ip_addr.port(),
-               }),
-       }) {
+       let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
                Some(tokio::spawn(async move {
                        // We should essentially always have enough room in a TCP socket buffer to send the
                        // initial 10s of bytes. However, tokio running in single-threaded mode will always