Merge pull request #2842 from jkczyz/2024-01-fix-peer-handler-unwrap
[rust-lightning] / lightning / src / ln / peer_handler.rs
index 6a935acf00d7b6cbb1cebbf4e633db05d65db3b0..c3a50fd2490e5db0accd5454b4f2f2f8e61500ed 100644 (file)
@@ -18,7 +18,7 @@
 use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
 
-use crate::sign::{KeysManager, NodeSigner, Recipient};
+use crate::sign::{NodeSigner, Recipient};
 use crate::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use crate::ln::ChannelId;
 use crate::ln::features::{InitFeatures, NodeFeatures};
@@ -31,9 +31,11 @@ use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, Mes
 use crate::ln::wire;
 use crate::ln::wire::{Encode, Type};
 #[cfg(not(c_bindings))]
-use crate::onion_message::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
-use crate::onion_message::{CustomOnionMessageHandler, OffersMessage, OffersMessageHandler, OnionMessageContents, PendingOnionMessage};
-use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId, NodeAlias};
+use crate::onion_message::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
+use crate::onion_message::messenger::{CustomOnionMessageHandler, PendingOnionMessage};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
+use crate::onion_message::packet::OnionMessageContents;
+use crate::routing::gossip::{NodeId, NodeAlias};
 use crate::util::atomic_counter::AtomicCounter;
 use crate::util::logger::{Logger, WithContext};
 use crate::util::string::PrintableString;
@@ -41,12 +43,19 @@ use crate::util::string::PrintableString;
 use crate::prelude::*;
 use crate::io;
 use alloc::collections::VecDeque;
-use crate::sync::{Arc, Mutex, MutexGuard, FairRwLock};
+use crate::sync::{Mutex, MutexGuard, FairRwLock};
 use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering};
 use core::{cmp, hash, fmt, mem};
 use core::ops::Deref;
 use core::convert::Infallible;
-#[cfg(feature = "std")] use std::error;
+#[cfg(feature = "std")]
+use std::error;
+#[cfg(not(c_bindings))]
+use {
+       crate::routing::gossip::{NetworkGraph, P2PGossipSync},
+       crate::sign::KeysManager,
+       crate::sync::Arc,
+};
 
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256::HashEngine as Sha256Engine;
@@ -296,6 +305,7 @@ impl ChannelMessageHandler for ErroringMessageHandler {
                features.set_channel_type_optional();
                features.set_scid_privacy_optional();
                features.set_zero_conf_optional();
+               features.set_route_blinding_optional();
                features
        }
 
@@ -377,7 +387,7 @@ pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref, CustomM: Deref> where
        /// A message handler which handles onion messages. This should generally be an
        /// [`OnionMessenger`], but can also be an [`IgnoringMessageHandler`].
        ///
-       /// [`OnionMessenger`]: crate::onion_message::OnionMessenger
+       /// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
        pub onion_message_handler: OM,
 
        /// A message handler which handles custom messages. The only LDK-provided implementation is
@@ -1262,7 +1272,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
        /// Append a message to a peer's pending outbound/write buffer
        fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
-               let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
+               let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                if is_gossip_msg(message.type_id()) {
                        log_gossip!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0));
                } else {
@@ -1367,7 +1377,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                                macro_rules! insert_node_id {
                                                        () => {
-                                                               let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
+                                                               let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                                                match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) {
                                                                        hash_map::Entry::Occupied(e) => {
                                                                                log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
@@ -1447,7 +1457,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peer.pending_read_buffer.resize(18, 0);
                                                                        peer.pending_read_is_header = true;
 
-                                                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
+                                                                       let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                                                        let message = match message_result {
                                                                                Ok(x) => x,
                                                                                Err(e) => {
@@ -1600,7 +1610,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                }
 
                if let wire::Message::GossipTimestampFilter(_msg) = message {
-                       // When supporting gossip messages, start inital gossip sync only after we receive
+                       // When supporting gossip messages, start initial gossip sync only after we receive
                        // a GossipTimestampFilter
                        if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() &&
                                !peer_lock.sent_gossip_timestamp_filter {
@@ -1825,13 +1835,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
                                                continue
                                        }
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+                                       let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
@@ -1853,13 +1863,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_node_announcement(msg.contents.node_id) {
                                                continue
                                        }
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+                                       let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
@@ -1881,13 +1891,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id)  {
                                                continue
                                        }
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+                                       let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
@@ -2207,7 +2217,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                        log_pubkey!(node_id));
                                                                        }
                                                                        // We do not have the peers write lock, so we just store that we're
-                                                                       // about to disconenct the peer and do it after we finish
+                                                                       // about to disconnect the peer and do it after we finish
                                                                        // processing most messages.
                                                                        let msg = msg.map(|msg| wire::Message::<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg));
                                                                        peers_to_disconnect.insert(node_id, msg);
@@ -2216,7 +2226,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
                                                                                log_pubkey!(node_id), msg.data);
                                                                        // We do not have the peers write lock, so we just store that we're
-                                                                       // about to disconenct the peer and do it after we finish
+                                                                       // about to disconnect the peer and do it after we finish
                                                                        // processing most messages.
                                                                        peers_to_disconnect.insert(node_id, Some(wire::Message::Warning(msg)));
                                                                },
@@ -2488,7 +2498,6 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
        // message...
        const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (SocketAddress::MAX_LEN as u32 + 1) / 2;
-       #[deny(const_err)]
        #[allow(dead_code)]
        // ...by failing to compile if the number of addresses that would be half of a message is
        // smaller than 100: