Combine common fields of `OpenChannel` & `OpenChannelV2` into struct
[rust-lightning] / lightning / src / ln / peer_handler.rs
index a4e0042414f8996f95c08deb72d7a578322fff97..0b752161b6aff18dee9c2a8982ee25545d85d2b8 100644 (file)
 use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
 
-use crate::sign::{KeysManager, NodeSigner, Recipient};
+use crate::sign::{NodeSigner, Recipient};
 use crate::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use crate::ln::ChannelId;
 use crate::ln::features::{InitFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
-#[cfg(not(c_bindings))]
-use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
 use crate::util::ser::{VecWriter, Writeable, Writer};
 use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE};
 use crate::ln::wire;
 use crate::ln::wire::{Encode, Type};
-#[cfg(not(c_bindings))]
-use crate::onion_message::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
-use crate::onion_message::{CustomOnionMessageHandler, OffersMessage, OffersMessageHandler, OnionMessageContents, PendingOnionMessage};
-use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId, NodeAlias};
+use crate::onion_message::messenger::{CustomOnionMessageHandler, PendingOnionMessage};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
+use crate::onion_message::packet::OnionMessageContents;
+use crate::routing::gossip::{NodeId, NodeAlias};
 use crate::util::atomic_counter::AtomicCounter;
 use crate::util::logger::{Logger, WithContext};
 use crate::util::string::PrintableString;
@@ -41,12 +39,21 @@ use crate::util::string::PrintableString;
 use crate::prelude::*;
 use crate::io;
 use alloc::collections::VecDeque;
-use crate::sync::{Arc, Mutex, MutexGuard, FairRwLock};
+use crate::sync::{Mutex, MutexGuard, FairRwLock};
 use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering};
 use core::{cmp, hash, fmt, mem};
 use core::ops::Deref;
 use core::convert::Infallible;
-#[cfg(feature = "std")] use std::error;
+#[cfg(feature = "std")]
+use std::error;
+#[cfg(not(c_bindings))]
+use {
+       crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager},
+       crate::onion_message::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger},
+       crate::routing::gossip::{NetworkGraph, P2PGossipSync},
+       crate::sign::KeysManager,
+       crate::sync::Arc,
+};
 
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256::HashEngine as Sha256Engine;
@@ -215,7 +222,7 @@ impl ChannelMessageHandler for ErroringMessageHandler {
        // Any messages which are related to a specific channel generate an error message to let the
        // peer know we don't care about channels.
        fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) {
-               ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
+               ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
        }
        fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
                ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
@@ -296,6 +303,7 @@ impl ChannelMessageHandler for ErroringMessageHandler {
                features.set_channel_type_optional();
                features.set_scid_privacy_optional();
                features.set_zero_conf_optional();
+               features.set_route_blinding_optional();
                features
        }
 
@@ -307,7 +315,7 @@ impl ChannelMessageHandler for ErroringMessageHandler {
        }
 
        fn handle_open_channel_v2(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
-               ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
+               ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
        }
 
        fn handle_accept_channel_v2(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
@@ -377,7 +385,7 @@ pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref, CustomM: Deref> where
        /// A message handler which handles onion messages. This should generally be an
        /// [`OnionMessenger`], but can also be an [`IgnoringMessageHandler`].
        ///
-       /// [`OnionMessenger`]: crate::onion_message::OnionMessenger
+       /// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
        pub onion_message_handler: OM,
 
        /// A message handler which handles custom messages. The only LDK-provided implementation is
@@ -1262,7 +1270,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
        /// Append a message to a peer's pending outbound/write buffer
        fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
-               let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
+               let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                if is_gossip_msg(message.type_id()) {
                        log_gossip!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0));
                } else {
@@ -1367,7 +1375,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                                macro_rules! insert_node_id {
                                                        () => {
-                                                               let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
+                                                               let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                                                match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) {
                                                                        hash_map::Entry::Occupied(e) => {
                                                                                log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
@@ -1447,7 +1455,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peer.pending_read_buffer.resize(18, 0);
                                                                        peer.pending_read_is_header = true;
 
-                                                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
+                                                                       let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                                                        let message = match message_result {
                                                                                Ok(x) => x,
                                                                                Err(e) => {
@@ -1600,7 +1608,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                }
 
                if let wire::Message::GossipTimestampFilter(_msg) = message {
-                       // When supporting gossip messages, start inital gossip sync only after we receive
+                       // When supporting gossip messages, start initial gossip sync only after we receive
                        // a GossipTimestampFilter
                        if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() &&
                                !peer_lock.sent_gossip_timestamp_filter {
@@ -1825,13 +1833,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
                                                continue
                                        }
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+                                       let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
@@ -1853,13 +1861,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_node_announcement(msg.contents.node_id) {
                                                continue
                                        }
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+                                       let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
@@ -1881,13 +1889,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id)  {
                                                continue
                                        }
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+                                       let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
@@ -1983,22 +1991,22 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
-                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       &msg.temporary_channel_id);
+                                                                       &msg.common_fields.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
-                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       &msg.temporary_channel_id);
+                                                                       &msg.common_fields.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
                                                        log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id,
-                                                                       log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
+                                                                       ChannelId::v1_from_funding_txid(msg.funding_txid.as_byte_array(), msg.funding_output_index));
                                                        // TODO: If the peer is gone we should generate a DiscardFunding event
                                                        // indicating to the wallet that they should just throw away this funding transaction
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
@@ -2207,7 +2215,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                        log_pubkey!(node_id));
                                                                        }
                                                                        // We do not have the peers write lock, so we just store that we're
-                                                                       // about to disconenct the peer and do it after we finish
+                                                                       // about to disconnect the peer and do it after we finish
                                                                        // processing most messages.
                                                                        let msg = msg.map(|msg| wire::Message::<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg));
                                                                        peers_to_disconnect.insert(node_id, msg);
@@ -2216,7 +2224,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
                                                                                log_pubkey!(node_id), msg.data);
                                                                        // We do not have the peers write lock, so we just store that we're
-                                                                       // about to disconenct the peer and do it after we finish
+                                                                       // about to disconnect the peer and do it after we finish
                                                                        // processing most messages.
                                                                        peers_to_disconnect.insert(node_id, Some(wire::Message::Warning(msg)));
                                                                },