Merge pull request #2226 from alecchendev/2023-04-persist-network-graph-on-rgs
[rust-lightning] / lightning / src / ln / peer_handler.rs
index 4cbe2a1bf2b0a4a93e2fd8a1d110ef418cf454ef..a20b316eb0a93e997a162e9857ef3cd60ad02373 100644 (file)
@@ -17,7 +17,8 @@
 
 use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
 
-use crate::chain::keysinterface::{KeysManager, NodeSigner, Recipient};
+use crate::sign::{KeysManager, NodeSigner, Recipient};
+use crate::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider};
 use crate::ln::features::{InitFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler};
@@ -27,9 +28,8 @@ use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
 use crate::ln::wire;
 use crate::ln::wire::Encode;
 use crate::onion_message::{CustomOnionMessageContents, CustomOnionMessageHandler, SimpleArcOnionMessenger, SimpleRefOnionMessenger};
-use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId};
+use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId, NodeAlias};
 use crate::util::atomic_counter::AtomicCounter;
-use crate::util::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider};
 use crate::util::logger::Logger;
 
 use crate::prelude::*;
@@ -46,17 +46,38 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256::HashEngine as Sha256Engine;
 use bitcoin::hashes::{HashEngine, Hash};
 
-/// Handler for BOLT1-compliant messages.
+/// A handler provided to [`PeerManager`] for reading and handling custom messages.
+///
+/// [BOLT 1] specifies a custom message type range for use with experimental or application-specific
+/// messages. `CustomMessageHandler` allows for user-defined handling of such types. See the
+/// [`lightning_custom_message`] crate for tools useful in composing more than one custom handler.
+///
+/// [BOLT 1]: https://github.com/lightning/bolts/blob/master/01-messaging.md
+/// [`lightning_custom_message`]: https://docs.rs/lightning_custom_message/latest/lightning_custom_message
 pub trait CustomMessageHandler: wire::CustomMessageReader {
-       /// Called with the message type that was received and the buffer to be read.
-       /// Can return a `MessageHandlingError` if the message could not be handled.
+       /// Handles the given message sent from `sender_node_id`, possibly producing messages for
+       /// [`CustomMessageHandler::get_and_clear_pending_msg`] to return and thus for [`PeerManager`]
+       /// to send.
        fn handle_custom_message(&self, msg: Self::CustomMessage, sender_node_id: &PublicKey) -> Result<(), LightningError>;
 
-       /// Gets the list of pending messages which were generated by the custom message
-       /// handler, clearing the list in the process. The first tuple element must
-       /// correspond to the intended recipients node ids. If no connection to one of the
-       /// specified node does not exist, the message is simply not sent to it.
+       /// Returns the list of pending messages that were generated by the handler, clearing the list
+       /// in the process. Each message is paired with the node id of the intended recipient. If no
+       /// connection to the node exists, then the message is simply not sent.
        fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)>;
+
+       /// Gets the node feature flags which this handler itself supports. All available handlers are
+       /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`]
+       /// which are broadcasted in our [`NodeAnnouncement`] message.
+       ///
+       /// [`NodeAnnouncement`]: crate::ln::msgs::NodeAnnouncement
+       fn provided_node_features(&self) -> NodeFeatures;
+
+       /// Gets the init feature flags which should be sent to the given peer. All available handlers
+       /// are queried similarly and their feature flags are OR'd together to form the [`InitFeatures`]
+       /// which are sent in our [`Init`] message.
+       ///
+       /// [`Init`]: crate::ln::msgs::Init
+       fn provided_init_features(&self, their_node_id: &PublicKey) -> InitFeatures;
 }
 
 /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information
@@ -72,7 +93,7 @@ impl RoutingMessageHandler for IgnoringMessageHandler {
        fn get_next_channel_announcement(&self, _starting_point: u64) ->
                Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { None }
        fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<msgs::NodeAnnouncement> { None }
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
        fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
        fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
        fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
@@ -81,14 +102,15 @@ impl RoutingMessageHandler for IgnoringMessageHandler {
        fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
                InitFeatures::empty()
        }
+       fn processing_queue_high(&self) -> bool { false }
 }
 impl OnionMessageProvider for IgnoringMessageHandler {
        fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option<msgs::OnionMessage> { None }
 }
 impl OnionMessageHandler for IgnoringMessageHandler {
        fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {}
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
-       fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
+       fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
        fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
        fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
                InitFeatures::empty()
@@ -141,6 +163,12 @@ impl CustomMessageHandler for IgnoringMessageHandler {
        }
 
        fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() }
+
+       fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+
+       fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+               InitFeatures::empty()
+       }
 }
 
 /// A dummy struct which implements `ChannelMessageHandler` without having any channels.
@@ -222,8 +250,8 @@ impl ChannelMessageHandler for ErroringMessageHandler {
        }
        // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them.
        fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
-       fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+       fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
        fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
        fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
        fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
@@ -244,17 +272,63 @@ impl ChannelMessageHandler for ErroringMessageHandler {
                features.set_zero_conf_optional();
                features
        }
+
+       fn handle_open_channel_v2(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
+       }
+
+       fn handle_accept_channel_v2(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
+       }
+
+       fn handle_tx_add_input(&self, their_node_id: &PublicKey, msg: &msgs::TxAddInput) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
+       }
+
+       fn handle_tx_add_output(&self, their_node_id: &PublicKey, msg: &msgs::TxAddOutput) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
+       }
+
+       fn handle_tx_remove_input(&self, their_node_id: &PublicKey, msg: &msgs::TxRemoveInput) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
+       }
+
+       fn handle_tx_remove_output(&self, their_node_id: &PublicKey, msg: &msgs::TxRemoveOutput) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
+       }
+
+       fn handle_tx_complete(&self, their_node_id: &PublicKey, msg: &msgs::TxComplete) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
+       }
+
+       fn handle_tx_signatures(&self, their_node_id: &PublicKey, msg: &msgs::TxSignatures) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
+       }
+
+       fn handle_tx_init_rbf(&self, their_node_id: &PublicKey, msg: &msgs::TxInitRbf) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
+       }
+
+       fn handle_tx_ack_rbf(&self, their_node_id: &PublicKey, msg: &msgs::TxAckRbf) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
+       }
+
+       fn handle_tx_abort(&self, their_node_id: &PublicKey, msg: &msgs::TxAbort) {
+               ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
+       }
 }
+
 impl Deref for ErroringMessageHandler {
        type Target = ErroringMessageHandler;
        fn deref(&self) -> &Self { self }
 }
 
 /// Provides references to trait impls which handle different types of messages.
-pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref> where
-               CM::Target: ChannelMessageHandler,
-               RM::Target: RoutingMessageHandler,
-               OM::Target: OnionMessageHandler,
+pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref, CustomM: Deref> where
+       CM::Target: ChannelMessageHandler,
+       RM::Target: RoutingMessageHandler,
+       OM::Target: OnionMessageHandler,
+       CustomM::Target: CustomMessageHandler,
 {
        /// A message handler which handles messages specific to channels. Usually this is just a
        /// [`ChannelManager`] object or an [`ErroringMessageHandler`].
@@ -267,16 +341,22 @@ pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref> where
        /// [`P2PGossipSync`]: crate::routing::gossip::P2PGossipSync
        pub route_handler: RM,
 
-       /// A message handler which handles onion messages. For now, this can only be an
-       /// [`IgnoringMessageHandler`].
+       /// A message handler which handles onion messages. This should generally be an
+       /// [`OnionMessenger`], but can also be an [`IgnoringMessageHandler`].
+       ///
+       /// [`OnionMessenger`]: crate::onion_message::OnionMessenger
        pub onion_message_handler: OM,
+
+       /// A message handler which handles custom messages. The only LDK-provided implementation is
+       /// [`IgnoringMessageHandler`].
+       pub custom_message_handler: CustomM,
 }
 
 /// Provides an object which can be used to send data to and which uniquely identifies a connection
 /// to a remote host. You will need to be able to generate multiple of these which meet Eq and
 /// implement Hash to meet the PeerManager API.
 ///
-/// For efficiency, Clone should be relatively cheap for this type.
+/// For efficiency, [`Clone`] should be relatively cheap for this type.
 ///
 /// Two descriptors may compare equal (by [`cmp::Eq`] and [`hash::Hash`]) as long as the original
 /// has been disconnected, the [`PeerManager`] has been informed of the disconnection (either by it
@@ -314,16 +394,7 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
 /// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the
 /// descriptor.
 #[derive(Clone)]
-pub struct PeerHandleError {
-       /// Used to indicate that we probably can't make any future connections to this peer (e.g.
-       /// because we required features that our peer was missing, or vice versa).
-       ///
-       /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
-       /// any channels with this peer or check for new versions of LDK.
-       ///
-       /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
-       pub no_connection_possible: bool,
-}
+pub struct PeerHandleError { }
 impl fmt::Debug for PeerHandleError {
        fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
                formatter.write_str("Peer Sent Invalid Data")
@@ -389,7 +460,15 @@ const BUFFER_DRAIN_MSGS_PER_TICK: usize = 32;
 
 struct Peer {
        channel_encryptor: PeerChannelEncryptor,
-       their_node_id: Option<PublicKey>,
+       /// We cache a `NodeId` here to avoid serializing peers' keys every time we forward gossip
+       /// messages in `PeerManager`. Use `Peer::set_their_node_id` to modify this field.
+       their_node_id: Option<(PublicKey, NodeId)>,
+       /// The features provided in the peer's [`msgs::Init`] message.
+       ///
+       /// This is set only after we've processed the [`msgs::Init`] message and called relevant
+       /// `peer_connected` handler methods. Thus, this field is set *iff* we've finished our
+       /// handshake and can talk to this peer normally (though use [`Peer::handshake_complete`] to
+       /// check this.
        their_features: Option<InitFeatures>,
        their_net_address: Option<NetAddress>,
 
@@ -409,12 +488,27 @@ struct Peer {
        sync_status: InitSyncTracker,
 
        msgs_sent_since_pong: usize,
-       awaiting_pong_timer_tick_intervals: i8,
+       awaiting_pong_timer_tick_intervals: i64,
        received_message_since_timer_tick: bool,
        sent_gossip_timestamp_filter: bool,
+
+       /// Indicates we've received a `channel_announcement` since the last time we had
+       /// [`PeerManager::gossip_processing_backlogged`] set (or, really, that we've received a
+       /// `channel_announcement` at all - we set this unconditionally but unset it every time we
+       /// check if we're gossip-processing-backlogged).
+       received_channel_announce_since_backlogged: bool,
+
+       inbound_connection: bool,
 }
 
 impl Peer {
+       /// True after we've processed the [`msgs::Init`] message and called relevant `peer_connected`
+       /// handler methods. Thus, this implies we've finished our handshake and can talk to this peer
+       /// normally.
+       fn handshake_complete(&self) -> bool {
+               self.their_features.is_some()
+       }
+
        /// Returns true if the channel announcements/updates for the given channel should be
        /// forwarded to this peer.
        /// If we are sending our routing table to this peer and we have not yet sent channel
@@ -422,6 +516,7 @@ impl Peer {
        /// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already
        /// sent the old versions, we should send the update, and so return true here.
        fn should_forward_channel_announcement(&self, channel_id: u64) -> bool {
+               if !self.handshake_complete() { return false; }
                if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
                        !self.sent_gossip_timestamp_filter {
                                return false;
@@ -435,6 +530,7 @@ impl Peer {
 
        /// Similar to the above, but for node announcements indexed by node_id.
        fn should_forward_node_announcement(&self, node_id: NodeId) -> bool {
+               if !self.handshake_complete() { return false; }
                if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
                        !self.sent_gossip_timestamp_filter {
                                return false;
@@ -448,8 +544,12 @@ impl Peer {
 
        /// Returns whether we should be reading bytes from this peer, based on whether its outbound
        /// buffer still has space and we don't need to pause reads to get some writes out.
-       fn should_read(&self) -> bool {
-               self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE
+       fn should_read(&mut self, gossip_processing_backlogged: bool) -> bool {
+               if !gossip_processing_backlogged {
+                       self.received_channel_announce_since_backlogged = false;
+               }
+               self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE &&
+                       (!gossip_processing_backlogged || !self.received_channel_announce_since_backlogged)
        }
 
        /// Determines if we should push additional gossip background sync (aka "backfill") onto a peer's
@@ -457,19 +557,20 @@ impl Peer {
        fn should_buffer_gossip_backfill(&self) -> bool {
                self.pending_outbound_buffer.is_empty() && self.gossip_broadcast_buffer.is_empty()
                        && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+                       && self.handshake_complete()
        }
 
        /// Determines if we should push an onion message onto a peer's outbound buffer. This is checked
        /// every time the peer's buffer may have been drained.
        fn should_buffer_onion_message(&self) -> bool {
-               self.pending_outbound_buffer.is_empty()
+               self.pending_outbound_buffer.is_empty() && self.handshake_complete()
                        && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
        }
 
        /// Determines if we should push additional gossip broadcast messages onto a peer's outbound
        /// buffer. This is checked every time the peer's buffer may have been drained.
        fn should_buffer_gossip_broadcast(&self) -> bool {
-               self.pending_outbound_buffer.is_empty()
+               self.pending_outbound_buffer.is_empty() && self.handshake_complete()
                        && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
        }
 
@@ -481,6 +582,10 @@ impl Peer {
                total_outbound_buffered > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP ||
                        self.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
        }
+
+       fn set_their_node_id(&mut self, node_id: PublicKey) {
+               self.their_node_id = Some((node_id, NodeId::from_pubkey(&node_id)));
+       }
 }
 
 /// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
@@ -489,7 +594,7 @@ impl Peer {
 /// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents
 /// issues such as overly long function definitions.
 ///
-/// (C-not exported) as `Arc`s don't make sense in bindings.
+/// This is not exported to bindings users as `Arc`s don't make sense in bindings.
 pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<L>>>, Arc<C>, Arc<L>>>, Arc<SimpleArcOnionMessenger<L>>, Arc<L>, IgnoringMessageHandler, Arc<KeysManager>>;
 
 /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference
@@ -499,9 +604,57 @@ pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArc
 /// But if this is not necessary, using a reference is more efficient. Defining these type aliases
 /// helps with issues such as long function definitions.
 ///
-/// (C-not exported) as general type aliases don't make sense in bindings.
+/// This is not exported to bindings users as general type aliases don't make sense in bindings.
 pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k, 'l, 'm, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'm, M, T, F, L>, &'f P2PGossipSync<&'g NetworkGraph<&'f L>, &'h C, &'f L>, &'i SimpleRefOnionMessenger<'j, 'k, L>, &'f L, IgnoringMessageHandler, &'c KeysManager>;
 
+
+/// A generic trait which is implemented for all [`PeerManager`]s. This makes bounding functions or
+/// structs on any [`PeerManager`] much simpler as only this trait is needed as a bound, rather
+/// than the full set of bounds on [`PeerManager`] itself.
+#[allow(missing_docs)]
+pub trait APeerManager {
+       type Descriptor: SocketDescriptor;
+       type CMT: ChannelMessageHandler + ?Sized;
+       type CM: Deref<Target=Self::CMT>;
+       type RMT: RoutingMessageHandler + ?Sized;
+       type RM: Deref<Target=Self::RMT>;
+       type OMT: OnionMessageHandler + ?Sized;
+       type OM: Deref<Target=Self::OMT>;
+       type LT: Logger + ?Sized;
+       type L: Deref<Target=Self::LT>;
+       type CMHT: CustomMessageHandler + ?Sized;
+       type CMH: Deref<Target=Self::CMHT>;
+       type NST: NodeSigner + ?Sized;
+       type NS: Deref<Target=Self::NST>;
+       /// Gets a reference to the underlying [`PeerManager`].
+       fn as_ref(&self) -> &PeerManager<Self::Descriptor, Self::CM, Self::RM, Self::OM, Self::L, Self::CMH, Self::NS>;
+}
+
+impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref, NS: Deref>
+APeerManager for PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> where
+       CM::Target: ChannelMessageHandler,
+       RM::Target: RoutingMessageHandler,
+       OM::Target: OnionMessageHandler,
+       L::Target: Logger,
+       CMH::Target: CustomMessageHandler,
+       NS::Target: NodeSigner,
+{
+       type Descriptor = Descriptor;
+       type CMT = <CM as Deref>::Target;
+       type CM = CM;
+       type RMT = <RM as Deref>::Target;
+       type RM = RM;
+       type OMT = <OM as Deref>::Target;
+       type OM = OM;
+       type LT = <L as Deref>::Target;
+       type L = L;
+       type CMHT = <CMH as Deref>::Target;
+       type CMH = CMH;
+       type NST = <NS as Deref>::Target;
+       type NS = NS;
+       fn as_ref(&self) -> &PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> { self }
+}
+
 /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
 /// socket events into messages which it passes on to its [`MessageHandler`].
 ///
@@ -514,10 +667,10 @@ pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k, 'l, 'm
 /// [`PeerManager`] functions related to the same connection must occur only in serial, making new
 /// calls only after previous ones have returned.
 ///
-/// Rather than using a plain PeerManager, it is preferable to use either a SimpleArcPeerManager
-/// a SimpleRefPeerManager, for conciseness. See their documentation for more details, but
-/// essentially you should default to using a SimpleRefPeerManager, and use a
-/// SimpleArcPeerManager when you require a PeerManager with a static lifetime, such as when
+/// Rather than using a plain [`PeerManager`], it is preferable to use either a [`SimpleArcPeerManager`]
+/// a [`SimpleRefPeerManager`], for conciseness. See their documentation for more details, but
+/// essentially you should default to using a [`SimpleRefPeerManager`], and use a
+/// [`SimpleArcPeerManager`] when you require a `PeerManager` with a static lifetime, such as when
 /// you're using lightning-net-tokio.
 ///
 /// [`read_event`]: PeerManager::read_event
@@ -528,7 +681,7 @@ pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: D
                L::Target: Logger,
                CMH::Target: CustomMessageHandler,
                NS::Target: NodeSigner {
-       message_handler: MessageHandler<CM, RM, OM>,
+       message_handler: MessageHandler<CM, RM, OM, CMH>,
        /// Connection state for each connected peer - we have an outer read-write lock which is taken
        /// as read while we're doing processing for a peer and taken write when a peer is being added
        /// or removed.
@@ -558,10 +711,12 @@ pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: D
        last_node_announcement_serial: AtomicU32,
 
        ephemeral_key_midstate: Sha256Engine,
-       custom_message_handler: CMH,
 
        peer_counter: AtomicCounter,
 
+       gossip_processing_backlogged: AtomicBool,
+       gossip_processing_backlog_lifted: AtomicBool,
+
        node_signer: NS,
 
        logger: L,
@@ -602,7 +757,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, OM: Deref, L: Deref, NS: Deref> Pe
        /// `OnionMessageHandler`. No routing message handler is used and network graph messages are
        /// ignored.
        ///
-       /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
+       /// `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
        /// cryptographically secure random bytes.
        ///
        /// `current_time` is used as an always-increasing counter that survives across restarts and is
@@ -610,13 +765,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, OM: Deref, L: Deref, NS: Deref> Pe
        /// timestamp, however if it is not available a persistent counter that increases once per
        /// minute should suffice.
        ///
-       /// (C-not exported) as we can't export a PeerManager with a dummy route handler
+       /// This is not exported to bindings users as we can't export a PeerManager with a dummy route handler
        pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self {
                Self::new(MessageHandler {
                        chan_handler: channel_message_handler,
                        route_handler: IgnoringMessageHandler{},
                        onion_message_handler,
-               }, current_time, ephemeral_random_data, logger, IgnoringMessageHandler{}, node_signer)
+                       custom_message_handler: IgnoringMessageHandler{},
+               }, current_time, ephemeral_random_data, logger, node_signer)
        }
 }
 
@@ -634,16 +790,17 @@ impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref, NS: Deref> PeerManager<D
        /// timestamp, however if it is not available a persistent counter that increases once per
        /// minute should suffice.
        ///
-       /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
+       /// `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
        /// cryptographically secure random bytes.
        ///
-       /// (C-not exported) as we can't export a PeerManager with a dummy channel handler
+       /// This is not exported to bindings users as we can't export a PeerManager with a dummy channel handler
        pub fn new_routing_only(routing_message_handler: RM, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self {
                Self::new(MessageHandler {
                        chan_handler: ErroringMessageHandler::new(),
                        route_handler: routing_message_handler,
                        onion_message_handler: IgnoringMessageHandler{},
-               }, current_time, ephemeral_random_data, logger, IgnoringMessageHandler{}, node_signer)
+                       custom_message_handler: IgnoringMessageHandler{},
+               }, current_time, ephemeral_random_data, logger, node_signer)
        }
 }
 
@@ -651,10 +808,10 @@ impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref, NS: Deref> PeerManager<D
 /// This works around `format!()` taking a reference to each argument, preventing
 /// `if let Some(node_id) = peer.their_node_id { format!(.., node_id) } else { .. }` from compiling
 /// due to lifetime errors.
-struct OptionalFromDebugger<'a>(&'a Option<PublicKey>);
+struct OptionalFromDebugger<'a>(&'a Option<(PublicKey, NodeId)>);
 impl core::fmt::Display for OptionalFromDebugger<'_> {
        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
-               if let Some(node_id) = self.0 { write!(f, " from {}", log_pubkey!(node_id)) } else { Ok(()) }
+               if let Some((node_id, _)) = self.0 { write!(f, " from {}", log_pubkey!(node_id)) } else { Ok(()) }
        }
 }
 
@@ -696,15 +853,16 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                CMH::Target: CustomMessageHandler,
                NS::Target: NodeSigner
 {
-       /// Constructs a new PeerManager with the given message handlers and node_id secret key
-       /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
+       /// Constructs a new `PeerManager` with the given message handlers.
+       ///
+       /// `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
        /// cryptographically secure random bytes.
        ///
        /// `current_time` is used as an always-increasing counter that survives across restarts and is
        /// incremented irregularly internally. In general it is best to simply use the current UNIX
        /// timestamp, however if it is not available a persistent counter that increases once per
        /// minute should suffice.
-       pub fn new(message_handler: MessageHandler<CM, RM, OM>, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH, node_signer: NS) -> Self {
+       pub fn new(message_handler: MessageHandler<CM, RM, OM, CMH>, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self {
                let mut ephemeral_key_midstate = Sha256::engine();
                ephemeral_key_midstate.input(ephemeral_random_data);
 
@@ -720,27 +878,33 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        blocked_event_processors: AtomicBool::new(false),
                        ephemeral_key_midstate,
                        peer_counter: AtomicCounter::new(),
+                       gossip_processing_backlogged: AtomicBool::new(false),
+                       gossip_processing_backlog_lifted: AtomicBool::new(false),
                        last_node_announcement_serial: AtomicU32::new(current_time),
                        logger,
-                       custom_message_handler,
                        node_signer,
                        secp_ctx,
                }
        }
 
-       /// Get the list of node ids for peers which have completed the initial handshake.
+       /// Get a list of tuples mapping from node id to network addresses for peers which have
+       /// completed the initial handshake.
        ///
-       /// For outbound connections, this will be the same as the their_node_id parameter passed in to
-       /// new_outbound_connection, however entries will only appear once the initial handshake has
-       /// completed and we are sure the remote peer has the private key for the given node_id.
-       pub fn get_peer_node_ids(&self) -> Vec<PublicKey> {
+       /// For outbound connections, the [`PublicKey`] will be the same as the `their_node_id` parameter
+       /// passed in to [`Self::new_outbound_connection`], however entries will only appear once the initial
+       /// handshake has completed and we are sure the remote peer has the private key for the given
+       /// [`PublicKey`].
+       ///
+       /// The returned `Option`s will only be `Some` if an address had been previously given via
+       /// [`Self::new_outbound_connection`] or [`Self::new_inbound_connection`].
+       pub fn get_peer_node_ids(&self) -> Vec<(PublicKey, Option<NetAddress>)> {
                let peers = self.peers.read().unwrap();
                peers.values().filter_map(|peer_mutex| {
                        let p = peer_mutex.lock().unwrap();
-                       if !p.channel_encryptor.is_ready_for_encryption() || p.their_features.is_none() {
+                       if !p.handshake_complete() {
                                return None;
                        }
-                       p.their_node_id
+                       Some((p.their_node_id.unwrap().0, p.their_net_address.clone()))
                }).collect()
        }
 
@@ -751,7 +915,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!")
        }
 
-       /// Indicates a new outbound connection has been established to a node with the given node_id
+       fn init_features(&self, their_node_id: &PublicKey) -> InitFeatures {
+               self.message_handler.chan_handler.provided_init_features(their_node_id)
+                       | self.message_handler.route_handler.provided_init_features(their_node_id)
+                       | self.message_handler.onion_message_handler.provided_init_features(their_node_id)
+                       | self.message_handler.custom_message_handler.provided_init_features(their_node_id)
+       }
+
+       /// Indicates a new outbound connection has been established to a node with the given `node_id`
        /// and an optional remote network address.
        ///
        /// The remote network address adds the option to report a remote IP address back to a connecting
@@ -763,40 +934,49 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// Returns a small number of bytes to send to the remote node (currently always 50).
        ///
        /// Panics if descriptor is duplicative with some other descriptor which has not yet been
-       /// [`socket_disconnected()`].
+       /// [`socket_disconnected`].
        ///
-       /// [`socket_disconnected()`]: PeerManager::socket_disconnected
+       /// [`socket_disconnected`]: PeerManager::socket_disconnected
        pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<Vec<u8>, PeerHandleError> {
                let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
                let res = peer_encryptor.get_act_one(&self.secp_ctx).to_vec();
                let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
 
                let mut peers = self.peers.write().unwrap();
-               if peers.insert(descriptor, Mutex::new(Peer {
-                       channel_encryptor: peer_encryptor,
-                       their_node_id: None,
-                       their_features: None,
-                       their_net_address: remote_network_address,
-
-                       pending_outbound_buffer: LinkedList::new(),
-                       pending_outbound_buffer_first_msg_offset: 0,
-                       gossip_broadcast_buffer: LinkedList::new(),
-                       awaiting_write_event: false,
-
-                       pending_read_buffer,
-                       pending_read_buffer_pos: 0,
-                       pending_read_is_header: false,
-
-                       sync_status: InitSyncTracker::NoSyncRequested,
-
-                       msgs_sent_since_pong: 0,
-                       awaiting_pong_timer_tick_intervals: 0,
-                       received_message_since_timer_tick: false,
-                       sent_gossip_timestamp_filter: false,
-               })).is_some() {
-                       panic!("PeerManager driver duplicated descriptors!");
-               };
-               Ok(res)
+               match peers.entry(descriptor) {
+                       hash_map::Entry::Occupied(_) => {
+                               debug_assert!(false, "PeerManager driver duplicated descriptors!");
+                               Err(PeerHandleError {})
+                       },
+                       hash_map::Entry::Vacant(e) => {
+                               e.insert(Mutex::new(Peer {
+                                       channel_encryptor: peer_encryptor,
+                                       their_node_id: None,
+                                       their_features: None,
+                                       their_net_address: remote_network_address,
+
+                                       pending_outbound_buffer: LinkedList::new(),
+                                       pending_outbound_buffer_first_msg_offset: 0,
+                                       gossip_broadcast_buffer: LinkedList::new(),
+                                       awaiting_write_event: false,
+
+                                       pending_read_buffer,
+                                       pending_read_buffer_pos: 0,
+                                       pending_read_is_header: false,
+
+                                       sync_status: InitSyncTracker::NoSyncRequested,
+
+                                       msgs_sent_since_pong: 0,
+                                       awaiting_pong_timer_tick_intervals: 0,
+                                       received_message_since_timer_tick: false,
+                                       sent_gossip_timestamp_filter: false,
+
+                                       received_channel_announce_since_backlogged: false,
+                                       inbound_connection: false,
+                               }));
+                               Ok(res)
+                       }
+               }
        }
 
        /// Indicates a new inbound connection has been established to a node with an optional remote
@@ -811,45 +991,67 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// the connection immediately.
        ///
        /// Panics if descriptor is duplicative with some other descriptor which has not yet been
-       /// [`socket_disconnected()`].
+       /// [`socket_disconnected`].
        ///
-       /// [`socket_disconnected()`]: PeerManager::socket_disconnected
+       /// [`socket_disconnected`]: PeerManager::socket_disconnected
        pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<(), PeerHandleError> {
                let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.node_signer);
                let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
 
                let mut peers = self.peers.write().unwrap();
-               if peers.insert(descriptor, Mutex::new(Peer {
-                       channel_encryptor: peer_encryptor,
-                       their_node_id: None,
-                       their_features: None,
-                       their_net_address: remote_network_address,
-
-                       pending_outbound_buffer: LinkedList::new(),
-                       pending_outbound_buffer_first_msg_offset: 0,
-                       gossip_broadcast_buffer: LinkedList::new(),
-                       awaiting_write_event: false,
-
-                       pending_read_buffer,
-                       pending_read_buffer_pos: 0,
-                       pending_read_is_header: false,
-
-                       sync_status: InitSyncTracker::NoSyncRequested,
-
-                       msgs_sent_since_pong: 0,
-                       awaiting_pong_timer_tick_intervals: 0,
-                       received_message_since_timer_tick: false,
-                       sent_gossip_timestamp_filter: false,
-               })).is_some() {
-                       panic!("PeerManager driver duplicated descriptors!");
-               };
-               Ok(())
+               match peers.entry(descriptor) {
+                       hash_map::Entry::Occupied(_) => {
+                               debug_assert!(false, "PeerManager driver duplicated descriptors!");
+                               Err(PeerHandleError {})
+                       },
+                       hash_map::Entry::Vacant(e) => {
+                               e.insert(Mutex::new(Peer {
+                                       channel_encryptor: peer_encryptor,
+                                       their_node_id: None,
+                                       their_features: None,
+                                       their_net_address: remote_network_address,
+
+                                       pending_outbound_buffer: LinkedList::new(),
+                                       pending_outbound_buffer_first_msg_offset: 0,
+                                       gossip_broadcast_buffer: LinkedList::new(),
+                                       awaiting_write_event: false,
+
+                                       pending_read_buffer,
+                                       pending_read_buffer_pos: 0,
+                                       pending_read_is_header: false,
+
+                                       sync_status: InitSyncTracker::NoSyncRequested,
+
+                                       msgs_sent_since_pong: 0,
+                                       awaiting_pong_timer_tick_intervals: 0,
+                                       received_message_since_timer_tick: false,
+                                       sent_gossip_timestamp_filter: false,
+
+                                       received_channel_announce_since_backlogged: false,
+                                       inbound_connection: true,
+                               }));
+                               Ok(())
+                       }
+               }
+       }
+
+       fn peer_should_read(&self, peer: &mut Peer) -> bool {
+               peer.should_read(self.gossip_processing_backlogged.load(Ordering::Relaxed))
        }
 
-       fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) {
+       fn update_gossip_backlogged(&self) {
+               let new_state = self.message_handler.route_handler.processing_queue_high();
+               let prev_state = self.gossip_processing_backlogged.swap(new_state, Ordering::Relaxed);
+               if prev_state && !new_state {
+                       self.gossip_processing_backlog_lifted.store(true, Ordering::Relaxed);
+               }
+       }
+
+       fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer, force_one_write: bool) {
+               let mut have_written = false;
                while !peer.awaiting_write_event {
                        if peer.should_buffer_onion_message() {
-                               if let Some(peer_node_id) = peer.their_node_id {
+                               if let Some((peer_node_id, _)) = peer.their_node_id {
                                        if let Some(next_onion_message) =
                                                self.message_handler.onion_message_handler.next_onion_message_for_peer(peer_node_id) {
                                                        self.enqueue_message(peer, &next_onion_message);
@@ -903,13 +1105,23 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                self.maybe_send_extra_ping(peer);
                        }
 
+                       let should_read = self.peer_should_read(peer);
                        let next_buff = match peer.pending_outbound_buffer.front() {
-                               None => return,
+                               None => {
+                                       if force_one_write && !have_written {
+                                               if should_read {
+                                                       let data_sent = descriptor.send_data(&[], should_read);
+                                                       debug_assert_eq!(data_sent, 0, "Can't write more than no data");
+                                               }
+                                       }
+                                       return
+                               },
                                Some(buff) => buff,
                        };
 
                        let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
-                       let data_sent = descriptor.send_data(pending, peer.should_read());
+                       let data_sent = descriptor.send_data(pending, should_read);
+                       have_written = true;
                        peer.pending_outbound_buffer_first_msg_offset += data_sent;
                        if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() {
                                peer.pending_outbound_buffer_first_msg_offset = 0;
@@ -927,7 +1139,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// May call [`send_data`] on the descriptor passed in (or an equal descriptor) before
        /// returning. Thus, be very careful with reentrancy issues! The invariants around calling
        /// [`write_buffer_space_avail`] in case a write did not fully complete must still hold - be
-       /// ready to call `[write_buffer_space_avail`] again if a write call generated here isn't
+       /// ready to call [`write_buffer_space_avail`] again if a write call generated here isn't
        /// sufficient!
        ///
        /// [`send_data`]: SocketDescriptor::send_data
@@ -939,12 +1151,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                // This is most likely a simple race condition where the user found that the socket
                                // was writeable, then we told the user to `disconnect_socket()`, then they called
                                // this method. Return an error to make sure we get disconnected.
-                               return Err(PeerHandleError { no_connection_possible: false });
+                               return Err(PeerHandleError { });
                        },
                        Some(peer_mutex) => {
                                let mut peer = peer_mutex.lock().unwrap();
                                peer.awaiting_write_event = false;
-                               self.do_attempt_write_data(descriptor, &mut peer);
+                               self.do_attempt_write_data(descriptor, &mut peer, false);
                        }
                };
                Ok(())
@@ -962,14 +1174,17 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// [`send_data`] call on this descriptor has `resume_read` set (preventing DoS issues in the
        /// send buffer).
        ///
+       /// In order to avoid processing too many messages at once per peer, `data` should be on the
+       /// order of 4KiB.
+       ///
        /// [`send_data`]: SocketDescriptor::send_data
        /// [`process_events`]: PeerManager::process_events
        pub fn read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result<bool, PeerHandleError> {
                match self.do_read_event(peer_descriptor, data) {
                        Ok(res) => Ok(res),
                        Err(e) => {
-                               log_trace!(self.logger, "Peer sent invalid data or we decided to disconnect due to a protocol error");
-                               self.disconnect_event_internal(peer_descriptor, e.no_connection_possible);
+                               log_trace!(self.logger, "Disconnecting peer due to a protocol error (usually a duplicate connection).");
+                               self.disconnect_event_internal(peer_descriptor);
                                Err(e)
                        }
                }
@@ -978,9 +1193,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// Append a message to a peer's pending outbound/write buffer
        fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
                if is_gossip_msg(message.type_id()) {
-                       log_gossip!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap()));
+                       log_gossip!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0));
                } else {
-                       log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap()))
+                       log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0))
                }
                peer.msgs_sent_since_pong += 1;
                peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(message));
@@ -1002,7 +1217,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                // This is most likely a simple race condition where the user read some bytes
                                // from the socket, then we told the user to `disconnect_socket()`, then they
                                // called this method. Return an error to make sure we get disconnected.
-                               return Err(PeerHandleError { no_connection_possible: false });
+                               return Err(PeerHandleError { });
                        },
                        Some(peer_mutex) => {
                                let mut read_pos = 0;
@@ -1016,7 +1231,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                msgs::ErrorAction::DisconnectPeer { msg: _ } => {
                                                                                        //TODO: Try to push msg
                                                                                        log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
-                                                                                       return Err(PeerHandleError{ no_connection_possible: false });
+                                                                                       return Err(PeerHandleError { });
                                                                                },
                                                                                msgs::ErrorAction::IgnoreAndLog(level) => {
                                                                                        log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
@@ -1065,14 +1280,18 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                                macro_rules! insert_node_id {
                                                        () => {
-                                                               match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap()) {
-                                                                       hash_map::Entry::Occupied(_) => {
-                                                                               log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap()));
+                                                               match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) {
+                                                                       hash_map::Entry::Occupied(e) => {
+                                                                               log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
                                                                                peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
-                                                                               return Err(PeerHandleError{ no_connection_possible: false })
+                                                                               // Check that the peers map is consistent with the
+                                                                               // node_id_to_descriptor map, as this has been broken
+                                                                               // before.
+                                                                               debug_assert!(peers.get(e.get()).is_some());
+                                                                               return Err(PeerHandleError { })
                                                                        },
                                                                        hash_map::Entry::Vacant(entry) => {
-                                                                               log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap()));
+                                                                               log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0));
                                                                                entry.insert(peer_descriptor.clone())
                                                                        },
                                                                };
@@ -1096,11 +1315,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
                                                                peer.pending_read_is_header = true;
 
-                                                               peer.their_node_id = Some(their_node_id);
+                                                               peer.set_their_node_id(their_node_id);
                                                                insert_node_id!();
-                                                               let features = self.message_handler.chan_handler.provided_init_features(&their_node_id)
-                                                                       .or(self.message_handler.route_handler.provided_init_features(&their_node_id))
-                                                                       .or(self.message_handler.onion_message_handler.provided_init_features(&their_node_id));
+                                                               let features = self.init_features(&their_node_id);
                                                                let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
                                                                self.enqueue_message(peer, &resp);
                                                                peer.awaiting_pong_timer_tick_intervals = 0;
@@ -1110,11 +1327,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..]));
                                                                peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
                                                                peer.pending_read_is_header = true;
-                                                               peer.their_node_id = Some(their_node_id);
+                                                               peer.set_their_node_id(their_node_id);
                                                                insert_node_id!();
-                                                               let features = self.message_handler.chan_handler.provided_init_features(&their_node_id)
-                                                                       .or(self.message_handler.route_handler.provided_init_features(&their_node_id))
-                                                                       .or(self.message_handler.onion_message_handler.provided_init_features(&their_node_id));
+                                                               let features = self.init_features(&their_node_id);
                                                                let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
                                                                self.enqueue_message(peer, &resp);
                                                                peer.awaiting_pong_timer_tick_intervals = 0;
@@ -1126,7 +1341,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); }
                                                                        peer.pending_read_buffer.resize(msg_len as usize + 16, 0);
                                                                        if msg_len < 2 { // Need at least the message type tag
-                                                                               return Err(PeerHandleError{ no_connection_possible: false });
+                                                                               return Err(PeerHandleError { });
                                                                        }
                                                                        peer.pending_read_is_header = false;
                                                                } else {
@@ -1140,7 +1355,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peer.pending_read_is_header = true;
 
                                                                        let mut reader = io::Cursor::new(&msg_data[..]);
-                                                                       let message_result = wire::read(&mut reader, &*self.custom_message_handler);
+                                                                       let message_result = wire::read(&mut reader, &*self.message_handler.custom_message_handler);
                                                                        let message = match message_result {
                                                                                Ok(x) => x,
                                                                                Err(e) => {
@@ -1169,19 +1384,19 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                                (msgs::DecodeError::UnknownRequiredFeature, ty) => {
                                                                                                        log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
                                                                                                        self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) });
-                                                                                                       return Err(PeerHandleError { no_connection_possible: false });
+                                                                                                       return Err(PeerHandleError { });
                                                                                                }
-                                                                                               (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }),
+                                                                                               (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }),
                                                                                                (msgs::DecodeError::InvalidValue, _) => {
                                                                                                        log_debug!(self.logger, "Got an invalid value while deserializing message");
-                                                                                                       return Err(PeerHandleError { no_connection_possible: false });
+                                                                                                       return Err(PeerHandleError { });
                                                                                                }
                                                                                                (msgs::DecodeError::ShortRead, _) => {
                                                                                                        log_debug!(self.logger, "Deserialization failed due to shortness of message");
-                                                                                                       return Err(PeerHandleError { no_connection_possible: false });
+                                                                                                       return Err(PeerHandleError { });
                                                                                                }
-                                                                                               (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }),
-                                                                                               (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }),
+                                                                                               (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }),
+                                                                                               (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }),
                                                                                        }
                                                                                }
                                                                        };
@@ -1191,7 +1406,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        }
                                                }
                                        }
-                                       pause_read = !peer.should_read();
+                                       pause_read = !self.peer_should_read(peer);
 
                                        if let Some(message) = msg_to_handle {
                                                match self.handle_message(&peer_mutex, peer_lock, message) {
@@ -1212,7 +1427,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                }
 
                for msg in msgs_to_forward.drain(..) {
-                       self.forward_broadcast_msg(&*peers, &msg, peer_node_id.as_ref());
+                       self.forward_broadcast_msg(&*peers, &msg, peer_node_id.as_ref().map(|(pk, _)| pk));
                }
 
                Ok(pause_read)
@@ -1226,17 +1441,24 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                mut peer_lock: MutexGuard<Peer>,
                message: wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>
        ) -> Result<Option<wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
-               let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages");
+               let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages").0;
                peer_lock.received_message_since_timer_tick = true;
 
                // Need an Init as first message
                if let wire::Message::Init(msg) = message {
-                       if msg.features.requires_unknown_bits() {
-                               log_debug!(self.logger, "Peer features required unknown version bits");
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                       let our_features = self.init_features(&their_node_id);
+                       if msg.features.requires_unknown_bits_from(&our_features) {
+                               log_debug!(self.logger, "Peer requires features unknown to us");
+                               return Err(PeerHandleError { }.into());
+                       }
+
+                       if our_features.requires_unknown_bits_from(&msg.features) {
+                               log_debug!(self.logger, "We require features unknown to our peer");
+                               return Err(PeerHandleError { }.into());
                        }
+
                        if peer_lock.their_features.is_some() {
-                               return Err(PeerHandleError{ no_connection_possible: false }.into());
+                               return Err(PeerHandleError { }.into());
                        }
 
                        log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
@@ -1246,24 +1468,24 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0);
                        }
 
-                       if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg) {
+                       if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
                                log_debug!(self.logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                               return Err(PeerHandleError { }.into());
                        }
-                       if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg) {
+                       if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
                                log_debug!(self.logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                               return Err(PeerHandleError { }.into());
                        }
-                       if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg) {
+                       if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
                                log_debug!(self.logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                               return Err(PeerHandleError { }.into());
                        }
 
                        peer_lock.their_features = Some(msg.features);
                        return Ok(None);
                } else if peer_lock.their_features.is_none() {
                        log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
-                       return Err(PeerHandleError{ no_connection_possible: false }.into());
+                       return Err(PeerHandleError { }.into());
                }
 
                if let wire::Message::GossipTimestampFilter(_msg) = message {
@@ -1277,6 +1499,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        return Ok(None);
                }
 
+               if let wire::Message::ChannelAnnouncement(ref _msg) = message {
+                       peer_lock.received_channel_announce_since_backlogged = true;
+               }
+
                mem::drop(peer_lock);
 
                if is_gossip_msg(message.type_id()) {
@@ -1311,7 +1537,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                }
                                self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
                                if msg.channel_id == [0; 32] {
-                                       return Err(PeerHandleError{ no_connection_possible: true }.into());
+                                       return Err(PeerHandleError { }.into());
                                }
                        },
                        wire::Message::Warning(msg) => {
@@ -1346,9 +1572,15 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        wire::Message::OpenChannel(msg) => {
                                self.message_handler.chan_handler.handle_open_channel(&their_node_id, &msg);
                        },
+                       wire::Message::OpenChannelV2(msg) => {
+                               self.message_handler.chan_handler.handle_open_channel_v2(&their_node_id, &msg);
+                       },
                        wire::Message::AcceptChannel(msg) => {
                                self.message_handler.chan_handler.handle_accept_channel(&their_node_id, &msg);
                        },
+                       wire::Message::AcceptChannelV2(msg) => {
+                               self.message_handler.chan_handler.handle_accept_channel_v2(&their_node_id, &msg);
+                       },
 
                        wire::Message::FundingCreated(msg) => {
                                self.message_handler.chan_handler.handle_funding_created(&their_node_id, &msg);
@@ -1360,6 +1592,35 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                self.message_handler.chan_handler.handle_channel_ready(&their_node_id, &msg);
                        },
 
+                       // Interactive transaction construction messages:
+                       wire::Message::TxAddInput(msg) => {
+                               self.message_handler.chan_handler.handle_tx_add_input(&their_node_id, &msg);
+                       },
+                       wire::Message::TxAddOutput(msg) => {
+                               self.message_handler.chan_handler.handle_tx_add_output(&their_node_id, &msg);
+                       },
+                       wire::Message::TxRemoveInput(msg) => {
+                               self.message_handler.chan_handler.handle_tx_remove_input(&their_node_id, &msg);
+                       },
+                       wire::Message::TxRemoveOutput(msg) => {
+                               self.message_handler.chan_handler.handle_tx_remove_output(&their_node_id, &msg);
+                       },
+                       wire::Message::TxComplete(msg) => {
+                               self.message_handler.chan_handler.handle_tx_complete(&their_node_id, &msg);
+                       },
+                       wire::Message::TxSignatures(msg) => {
+                               self.message_handler.chan_handler.handle_tx_signatures(&their_node_id, &msg);
+                       },
+                       wire::Message::TxInitRbf(msg) => {
+                               self.message_handler.chan_handler.handle_tx_init_rbf(&their_node_id, &msg);
+                       },
+                       wire::Message::TxAckRbf(msg) => {
+                               self.message_handler.chan_handler.handle_tx_ack_rbf(&their_node_id, &msg);
+                       },
+                       wire::Message::TxAbort(msg) => {
+                               self.message_handler.chan_handler.handle_tx_abort(&their_node_id, &msg);
+                       }
+
                        wire::Message::Shutdown(msg) => {
                                self.message_handler.chan_handler.handle_shutdown(&their_node_id, &msg);
                        },
@@ -1403,12 +1664,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                .map_err(|e| -> MessageHandlingError { e.into() })? {
                                        should_forward = Some(wire::Message::ChannelAnnouncement(msg));
                                }
+                               self.update_gossip_backlogged();
                        },
                        wire::Message::NodeAnnouncement(msg) => {
                                if self.message_handler.route_handler.handle_node_announcement(&msg)
                                                .map_err(|e| -> MessageHandlingError { e.into() })? {
                                        should_forward = Some(wire::Message::NodeAnnouncement(msg));
                                }
+                               self.update_gossip_backlogged();
                        },
                        wire::Message::ChannelUpdate(msg) => {
                                self.message_handler.chan_handler.handle_channel_update(&their_node_id, &msg);
@@ -1416,6 +1679,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                .map_err(|e| -> MessageHandlingError { e.into() })? {
                                        should_forward = Some(wire::Message::ChannelUpdate(msg));
                                }
+                               self.update_gossip_backlogged();
                        },
                        wire::Message::QueryShortChannelIds(msg) => {
                                self.message_handler.route_handler.handle_query_short_channel_ids(&their_node_id, msg)?;
@@ -1438,14 +1702,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        // Unknown messages:
                        wire::Message::Unknown(type_id) if message.is_even() => {
                                log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
-                               // Fail the channel if message is an even, unknown type as per BOLT #1.
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                               return Err(PeerHandleError { }.into());
                        },
                        wire::Message::Unknown(type_id) => {
                                log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id);
                        },
                        wire::Message::Custom(custom) => {
-                               self.custom_message_handler.handle_custom_message(custom, &their_node_id)?;
+                               self.message_handler.custom_message_handler.handle_custom_message(custom, &their_node_id)?;
                        },
                };
                Ok(should_forward)
@@ -1459,21 +1722,22 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
+                                       if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
                                                continue
                                        }
+                                       debug_assert!(peer.their_node_id.is_some());
+                                       debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
-                                       if let Some(their_node_id) = peer.their_node_id {
-                                               let their_node_id = NodeId::from_pubkey(&their_node_id);
+                                       if let Some((_, their_node_id)) = peer.their_node_id {
                                                if their_node_id == msg.contents.node_id_1 || their_node_id == msg.contents.node_id_2 {
                                                        continue;
                                                }
                                        }
-                                       if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
+                                       if except_node.is_some() && peer.their_node_id.as_ref().map(|(pk, _)| pk) == except_node {
                                                continue;
                                        }
                                        self.enqueue_encoded_gossip_broadcast(&mut *peer, encoded_msg.clone());
@@ -1485,20 +1749,22 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
+                                       if !peer.handshake_complete() ||
                                                        !peer.should_forward_node_announcement(msg.contents.node_id) {
                                                continue
                                        }
+                                       debug_assert!(peer.their_node_id.is_some());
+                                       debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
-                                       if let Some(their_node_id) = peer.their_node_id {
-                                               if NodeId::from_pubkey(&their_node_id) == msg.contents.node_id {
+                                       if let Some((_, their_node_id)) = peer.their_node_id {
+                                               if their_node_id == msg.contents.node_id {
                                                        continue;
                                                }
                                        }
-                                       if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
+                                       if except_node.is_some() && peer.their_node_id.as_ref().map(|(pk, _)| pk) == except_node {
                                                continue;
                                        }
                                        self.enqueue_encoded_gossip_broadcast(&mut *peer, encoded_msg.clone());
@@ -1510,15 +1776,17 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
+                                       if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id)  {
                                                continue
                                        }
+                                       debug_assert!(peer.their_node_id.is_some());
+                                       debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
-                                       if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
+                                       if except_node.is_some() && peer.their_node_id.as_ref().map(|(pk, _)| pk) == except_node {
                                                continue;
                                        }
                                        self.enqueue_encoded_gossip_broadcast(&mut *peer, encoded_msg.clone());
@@ -1567,6 +1835,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        }
                }
 
+               self.update_gossip_backlogged();
+               let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
+
                let mut peers_to_disconnect = HashMap::new();
                let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
                events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
@@ -1590,7 +1861,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        Some(descriptor) => match peers.get(&descriptor) {
                                                                Some(peer_mutex) => {
                                                                        let peer_lock = peer_mutex.lock().unwrap();
-                                                                       if peer_lock.their_features.is_none() {
+                                                                       if !peer_lock.handshake_complete() {
                                                                                continue;
                                                                        }
                                                                        peer_lock
@@ -1615,12 +1886,24 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                log_bytes!(msg.temporary_channel_id));
                                                self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
+                                       MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.temporary_channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
                                        MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.temporary_channel_id));
                                                self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
+                                       MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.temporary_channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
                                        MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
                                                                log_pubkey!(node_id),
@@ -1642,6 +1925,60 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                log_bytes!(msg.channel_id));
                                                self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
+                                       MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
+                                       MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
+                                       MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
+                                       MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
+                                       MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
+                                       MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
+                                       MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
+                                       MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
+                                       MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id),
+                                                               log_bytes!(msg.channel_id));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                       },
                                        MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
                                                                log_pubkey!(node_id),
@@ -1790,13 +2127,15 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                }
                        }
 
-                       for (node_id, msg) in self.custom_message_handler.get_and_clear_pending_msg() {
+                       for (node_id, msg) in self.message_handler.custom_message_handler.get_and_clear_pending_msg() {
                                if peers_to_disconnect.get(&node_id).is_some() { continue; }
                                self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg);
                        }
 
                        for (descriptor, peer_mutex) in peers.iter() {
-                               self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
+                               let mut peer = peer_mutex.lock().unwrap();
+                               if flush_read_disabled { peer.received_channel_announce_since_backlogged = false; }
+                               self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer, flush_read_disabled);
                        }
                }
                if !peers_to_disconnect.is_empty() {
@@ -1808,24 +2147,21 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                // thread can be holding the peer lock if we have the global write
                                // lock).
 
-                               if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
+                               let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+                               if let Some(mut descriptor) = descriptor_opt {
                                        if let Some(peer_mutex) = peers.remove(&descriptor) {
+                                               let mut peer = peer_mutex.lock().unwrap();
                                                if let Some(msg) = msg {
                                                        log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
                                                                        log_pubkey!(node_id),
                                                                        msg.data);
-                                                       let mut peer = peer_mutex.lock().unwrap();
                                                        self.enqueue_message(&mut *peer, &msg);
                                                        // This isn't guaranteed to work, but if there is enough free
                                                        // room in the send buffer, put the error message there...
-                                                       self.do_attempt_write_data(&mut descriptor, &mut *peer);
-                                               } else {
-                                                       log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
+                                                       self.do_attempt_write_data(&mut descriptor, &mut *peer, false);
                                                }
-                                       }
-                                       descriptor.disconnect_socket();
-                                       self.message_handler.chan_handler.peer_disconnected(&node_id, false);
-                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
+                                               self.do_disconnect(descriptor, &*peer, "DisconnectPeer HandleError");
+                                       } else { debug_assert!(false, "Missing connection for peer"); }
                                }
                        }
                }
@@ -1833,10 +2169,26 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
        /// Indicates that the given socket descriptor's connection is now closed.
        pub fn socket_disconnected(&self, descriptor: &Descriptor) {
-               self.disconnect_event_internal(descriptor, false);
+               self.disconnect_event_internal(descriptor);
+       }
+
+       fn do_disconnect(&self, mut descriptor: Descriptor, peer: &Peer, reason: &'static str) {
+               if !peer.handshake_complete() {
+                       log_trace!(self.logger, "Disconnecting peer which hasn't completed handshake due to {}", reason);
+                       descriptor.disconnect_socket();
+                       return;
+               }
+
+               debug_assert!(peer.their_node_id.is_some());
+               if let Some((node_id, _)) = peer.their_node_id {
+                       log_trace!(self.logger, "Disconnecting peer with id {} due to {}", node_id, reason);
+                       self.message_handler.chan_handler.peer_disconnected(&node_id);
+                       self.message_handler.onion_message_handler.peer_disconnected(&node_id);
+               }
+               descriptor.disconnect_socket();
        }
 
-       fn disconnect_event_internal(&self, descriptor: &Descriptor, no_connection_possible: bool) {
+       fn disconnect_event_internal(&self, descriptor: &Descriptor) {
                let mut peers = self.peers.write().unwrap();
                let peer_option = peers.remove(descriptor);
                match peer_option {
@@ -1847,13 +2199,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        },
                        Some(peer_lock) => {
                                let peer = peer_lock.lock().unwrap();
-                               if let Some(node_id) = peer.their_node_id {
-                                       log_trace!(self.logger,
-                                               "Handling disconnection of peer {}, with {}future connection to the peer possible.",
-                                               log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
-                                       self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
-                                       self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
-                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
+                               if let Some((node_id, _)) = peer.their_node_id {
+                                       log_trace!(self.logger, "Handling disconnection of peer {}", log_pubkey!(node_id));
+                                       let removed = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+                                       debug_assert!(removed.is_some(), "descriptor maps should be consistent");
+                                       if !peer.handshake_complete() { return; }
+                                       self.message_handler.chan_handler.peer_disconnected(&node_id);
+                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id);
                                }
                        }
                };
@@ -1861,21 +2213,17 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
        /// Disconnect a peer given its node id.
        ///
-       /// Set `no_connection_possible` to true to prevent any further connection with this peer,
-       /// force-closing any channels we have with it.
-       ///
        /// If a peer is connected, this will call [`disconnect_socket`] on the descriptor for the
        /// peer. Thus, be very careful about reentrancy issues.
        ///
        /// [`disconnect_socket`]: SocketDescriptor::disconnect_socket
-       pub fn disconnect_by_node_id(&self, node_id: PublicKey, no_connection_possible: bool) {
+       pub fn disconnect_by_node_id(&self, node_id: PublicKey) {
                let mut peers_lock = self.peers.write().unwrap();
-               if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
-                       log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id);
-                       peers_lock.remove(&descriptor);
-                       self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
-                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
-                       descriptor.disconnect_socket();
+               if let Some(descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
+                       let peer_opt = peers_lock.remove(&descriptor);
+                       if let Some(peer_mutex) = peer_opt {
+                               self.do_disconnect(descriptor, &*peer_mutex.lock().unwrap(), "client request");
+                       } else { debug_assert!(false, "node_id_to_descriptor thought we had a peer"); }
                }
        }
 
@@ -1886,13 +2234,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                let mut peers_lock = self.peers.write().unwrap();
                self.node_id_to_descriptor.lock().unwrap().clear();
                let peers = &mut *peers_lock;
-               for (mut descriptor, peer) in peers.drain() {
-                       if let Some(node_id) = peer.lock().unwrap().their_node_id {
-                               log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id);
-                               self.message_handler.chan_handler.peer_disconnected(&node_id, false);
-                               self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
-                       }
-                       descriptor.disconnect_socket();
+               for (descriptor, peer_mutex) in peers.drain() {
+                       self.do_disconnect(descriptor, &*peer_mutex.lock().unwrap(), "client request to disconnect all peers");
                }
        }
 
@@ -1926,9 +2269,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                {
                        let peers_lock = self.peers.read().unwrap();
 
+                       self.update_gossip_backlogged();
+                       let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
+
                        for (descriptor, peer_mutex) in peers_lock.iter() {
                                let mut peer = peer_mutex.lock().unwrap();
-                               if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_node_id.is_none() {
+                               if flush_read_disabled { peer.received_channel_announce_since_backlogged = false; }
+
+                               if !peer.handshake_complete() {
                                        // The peer needs to complete its handshake before we can exchange messages. We
                                        // give peers one timer tick to complete handshake, reusing
                                        // `awaiting_pong_timer_tick_intervals` to track number of timer ticks taken
@@ -1940,56 +2288,56 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        }
                                        continue;
                                }
+                               debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+                               debug_assert!(peer.their_node_id.is_some());
 
-                               if peer.awaiting_pong_timer_tick_intervals == -1 {
-                                       // Magic value set in `maybe_send_extra_ping`.
-                                       peer.awaiting_pong_timer_tick_intervals = 1;
+                               loop { // Used as a `goto` to skip writing a Ping message.
+                                       if peer.awaiting_pong_timer_tick_intervals == -1 {
+                                               // Magic value set in `maybe_send_extra_ping`.
+                                               peer.awaiting_pong_timer_tick_intervals = 1;
+                                               peer.received_message_since_timer_tick = false;
+                                               break;
+                                       }
+
+                                       if (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick)
+                                               || peer.awaiting_pong_timer_tick_intervals as u64 >
+                                                       MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64
+                                       {
+                                               descriptors_needing_disconnect.push(descriptor.clone());
+                                               break;
+                                       }
                                        peer.received_message_since_timer_tick = false;
-                                       continue;
-                               }
 
-                               if (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick)
-                                       || peer.awaiting_pong_timer_tick_intervals as u64 >
-                                               MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64
-                               {
-                                       descriptors_needing_disconnect.push(descriptor.clone());
-                                       continue;
-                               }
-                               peer.received_message_since_timer_tick = false;
+                                       if peer.awaiting_pong_timer_tick_intervals > 0 {
+                                               peer.awaiting_pong_timer_tick_intervals += 1;
+                                               break;
+                                       }
 
-                               if peer.awaiting_pong_timer_tick_intervals > 0 {
-                                       peer.awaiting_pong_timer_tick_intervals += 1;
-                                       continue;
+                                       peer.awaiting_pong_timer_tick_intervals = 1;
+                                       let ping = msgs::Ping {
+                                               ponglen: 0,
+                                               byteslen: 64,
+                                       };
+                                       self.enqueue_message(&mut *peer, &ping);
+                                       break;
                                }
-
-                               peer.awaiting_pong_timer_tick_intervals = 1;
-                               let ping = msgs::Ping {
-                                       ponglen: 0,
-                                       byteslen: 64,
-                               };
-                               self.enqueue_message(&mut *peer, &ping);
-                               self.do_attempt_write_data(&mut (descriptor.clone()), &mut *peer);
+                               self.do_attempt_write_data(&mut (descriptor.clone()), &mut *peer, flush_read_disabled);
                        }
                }
 
                if !descriptors_needing_disconnect.is_empty() {
                        {
                                let mut peers_lock = self.peers.write().unwrap();
-                               for descriptor in descriptors_needing_disconnect.iter() {
-                                       if let Some(peer) = peers_lock.remove(descriptor) {
-                                               if let Some(node_id) = peer.lock().unwrap().their_node_id {
-                                                       log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
+                               for descriptor in descriptors_needing_disconnect {
+                                       if let Some(peer_mutex) = peers_lock.remove(&descriptor) {
+                                               let peer = peer_mutex.lock().unwrap();
+                                               if let Some((node_id, _)) = peer.their_node_id {
                                                        self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
-                                                       self.message_handler.chan_handler.peer_disconnected(&node_id, false);
-                                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
                                                }
+                                               self.do_disconnect(descriptor, &*peer, "ping/handshake timeout");
                                        }
                                }
                        }
-
-                       for mut descriptor in descriptors_needing_disconnect.drain(..) {
-                               descriptor.disconnect_socket();
-                       }
                }
        }
 
@@ -2030,13 +2378,16 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                addresses.sort_by_key(|addr| addr.get_id());
 
                let features = self.message_handler.chan_handler.provided_node_features()
-                       .or(self.message_handler.route_handler.provided_node_features())
-                       .or(self.message_handler.onion_message_handler.provided_node_features());
+                       | self.message_handler.route_handler.provided_node_features()
+                       | self.message_handler.onion_message_handler.provided_node_features()
+                       | self.message_handler.custom_message_handler.provided_node_features();
                let announcement = msgs::UnsignedNodeAnnouncement {
                        features,
                        timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel),
                        node_id: NodeId::from_pubkey(&self.node_signer.get_node_id(Recipient::Node).unwrap()),
-                       rgb, alias, addresses,
+                       rgb,
+                       alias: NodeAlias(alias),
+                       addresses,
                        excess_address_data: Vec::new(),
                        excess_data: Vec::new(),
                };
@@ -2076,23 +2427,28 @@ fn is_gossip_msg(type_id: u16) -> bool {
 
 #[cfg(test)]
 mod tests {
-       use crate::chain::keysinterface::{NodeSigner, Recipient};
-       use crate::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
+       use crate::sign::{NodeSigner, Recipient};
+       use crate::events;
+       use crate::io;
+       use crate::ln::features::{InitFeatures, NodeFeatures};
+       use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
+       use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
        use crate::ln::{msgs, wire};
-       use crate::ln::msgs::NetAddress;
-       use crate::util::events;
+       use crate::ln::msgs::{LightningError, NetAddress};
        use crate::util::test_utils;
 
-       use bitcoin::secp256k1::SecretKey;
+       use bitcoin::secp256k1::{PublicKey, SecretKey};
 
        use crate::prelude::*;
        use crate::sync::{Arc, Mutex};
-       use core::sync::atomic::Ordering;
+       use core::convert::Infallible;
+       use core::sync::atomic::{AtomicBool, Ordering};
 
        #[derive(Clone)]
        struct FileDescriptor {
                fd: u16,
                outbound_data: Arc<Mutex<Vec<u8>>>,
+               disconnect: Arc<AtomicBool>,
        }
        impl PartialEq for FileDescriptor {
                fn eq(&self, other: &Self) -> bool {
@@ -2112,25 +2468,57 @@ mod tests {
                        data.len()
                }
 
-               fn disconnect_socket(&mut self) {}
+               fn disconnect_socket(&mut self) { self.disconnect.store(true, Ordering::Release); }
        }
 
        struct PeerManagerCfg {
                chan_handler: test_utils::TestChannelMessageHandler,
                routing_handler: test_utils::TestRoutingMessageHandler,
+               custom_handler: TestCustomMessageHandler,
                logger: test_utils::TestLogger,
                node_signer: test_utils::TestNodeSigner,
        }
 
+       struct TestCustomMessageHandler {
+               features: InitFeatures,
+       }
+
+       impl wire::CustomMessageReader for TestCustomMessageHandler {
+               type CustomMessage = Infallible;
+               fn read<R: io::Read>(&self, _: u16, _: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError> {
+                       Ok(None)
+               }
+       }
+
+       impl CustomMessageHandler for TestCustomMessageHandler {
+               fn handle_custom_message(&self, _: Infallible, _: &PublicKey) -> Result<(), LightningError> {
+                       unreachable!();
+               }
+
+               fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() }
+
+               fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+
+               fn provided_init_features(&self, _: &PublicKey) -> InitFeatures {
+                       self.features.clone()
+               }
+       }
+
        fn create_peermgr_cfgs(peer_count: usize) -> Vec<PeerManagerCfg> {
                let mut cfgs = Vec::new();
                for i in 0..peer_count {
                        let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap();
+                       let features = {
+                               let mut feature_bits = vec![0u8; 33];
+                               feature_bits[32] = 0b00000001;
+                               InitFeatures::from_le_bytes(feature_bits)
+                       };
                        cfgs.push(
                                PeerManagerCfg{
                                        chan_handler: test_utils::TestChannelMessageHandler::new(),
                                        logger: test_utils::TestLogger::new(),
                                        routing_handler: test_utils::TestRoutingMessageHandler::new(),
+                                       custom_handler: TestCustomMessageHandler { features },
                                        node_signer: test_utils::TestNodeSigner::new(node_secret),
                                }
                        );
@@ -2139,24 +2527,59 @@ mod tests {
                cfgs
        }
 
-       fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>> {
+       fn create_incompatible_peermgr_cfgs(peer_count: usize) -> Vec<PeerManagerCfg> {
+               let mut cfgs = Vec::new();
+               for i in 0..peer_count {
+                       let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap();
+                       let features = {
+                               let mut feature_bits = vec![0u8; 33 + i + 1];
+                               feature_bits[33 + i] = 0b00000001;
+                               InitFeatures::from_le_bytes(feature_bits)
+                       };
+                       cfgs.push(
+                               PeerManagerCfg{
+                                       chan_handler: test_utils::TestChannelMessageHandler::new(),
+                                       logger: test_utils::TestLogger::new(),
+                                       routing_handler: test_utils::TestRoutingMessageHandler::new(),
+                                       custom_handler: TestCustomMessageHandler { features },
+                                       node_signer: test_utils::TestNodeSigner::new(node_secret),
+                               }
+                       );
+               }
+
+               cfgs
+       }
+
+       fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, &'a TestCustomMessageHandler, &'a test_utils::TestNodeSigner>> {
                let mut peers = Vec::new();
                for i in 0..peer_count {
                        let ephemeral_bytes = [i as u8; 32];
-                       let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler, onion_message_handler: IgnoringMessageHandler {} };
-                       let peer = PeerManager::new(msg_handler, 0, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {}, &cfgs[i].node_signer);
+                       let msg_handler = MessageHandler {
+                               chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler,
+                               onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: &cfgs[i].custom_handler
+                       };
+                       let peer = PeerManager::new(msg_handler, 0, &ephemeral_bytes, &cfgs[i].logger, &cfgs[i].node_signer);
                        peers.push(peer);
                }
 
                peers
        }
 
-       fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>) -> (FileDescriptor, FileDescriptor) {
-               let a_id = peer_a.node_signer.get_node_id(Recipient::Node).unwrap();
-               let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
-               let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
-               let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
-               peer_a.new_inbound_connection(fd_a.clone(), None).unwrap();
+       fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, &'a TestCustomMessageHandler, &'a test_utils::TestNodeSigner>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, &'a TestCustomMessageHandler, &'a test_utils::TestNodeSigner>) -> (FileDescriptor, FileDescriptor) {
+               let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap();
+               let mut fd_a = FileDescriptor {
+                       fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                       disconnect: Arc::new(AtomicBool::new(false)),
+               };
+               let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+               let id_b = peer_b.node_signer.get_node_id(Recipient::Node).unwrap();
+               let mut fd_b = FileDescriptor {
+                       fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                       disconnect: Arc::new(AtomicBool::new(false)),
+               };
+               let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+               let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
+               peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
                assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
                peer_a.process_events();
 
@@ -2171,27 +2594,142 @@ mod tests {
                let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
                assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
 
+               assert!(peer_a.get_peer_node_ids().contains(&(id_b, Some(addr_b))));
+               assert!(peer_b.get_peer_node_ids().contains(&(id_a, Some(addr_a))));
+
                (fd_a.clone(), fd_b.clone())
        }
 
+       #[test]
+       #[cfg(feature = "std")]
+       fn fuzz_threaded_connections() {
+               // Spawn two threads which repeatedly connect two peers together, leading to "got second
+               // connection with peer" disconnections and rapid reconnect. This previously found an issue
+               // with our internal map consistency, and is a generally good smoke test of disconnection.
+               let cfgs = Arc::new(create_peermgr_cfgs(2));
+               // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }.
+               let peers = Arc::new(create_network(2, unsafe { &*(&*cfgs as *const _) as &'static _ }));
+
+               let start_time = std::time::Instant::now();
+               macro_rules! spawn_thread { ($id: expr) => { {
+                       let peers = Arc::clone(&peers);
+                       let cfgs = Arc::clone(&cfgs);
+                       std::thread::spawn(move || {
+                               let mut ctr = 0;
+                               while start_time.elapsed() < std::time::Duration::from_secs(1) {
+                                       let id_a = peers[0].node_signer.get_node_id(Recipient::Node).unwrap();
+                                       let mut fd_a = FileDescriptor {
+                                               fd: $id  + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                                               disconnect: Arc::new(AtomicBool::new(false)),
+                                       };
+                                       let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+                                       let mut fd_b = FileDescriptor {
+                                               fd: $id + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                                               disconnect: Arc::new(AtomicBool::new(false)),
+                                       };
+                                       let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+                                       let initial_data = peers[1].new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
+                                       peers[0].new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
+                                       if peers[0].read_event(&mut fd_a, &initial_data).is_err() { break; }
+
+                                       while start_time.elapsed() < std::time::Duration::from_secs(1) {
+                                               peers[0].process_events();
+                                               if fd_a.disconnect.load(Ordering::Acquire) { break; }
+                                               let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+                                               if peers[1].read_event(&mut fd_b, &a_data).is_err() { break; }
+
+                                               peers[1].process_events();
+                                               if fd_b.disconnect.load(Ordering::Acquire) { break; }
+                                               let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+                                               if peers[0].read_event(&mut fd_a, &b_data).is_err() { break; }
+
+                                               cfgs[0].chan_handler.pending_events.lock().unwrap()
+                                                       .push(crate::events::MessageSendEvent::SendShutdown {
+                                                               node_id: peers[1].node_signer.get_node_id(Recipient::Node).unwrap(),
+                                                               msg: msgs::Shutdown {
+                                                                       channel_id: [0; 32],
+                                                                       scriptpubkey: bitcoin::Script::new(),
+                                                               },
+                                                       });
+                                               cfgs[1].chan_handler.pending_events.lock().unwrap()
+                                                       .push(crate::events::MessageSendEvent::SendShutdown {
+                                                               node_id: peers[0].node_signer.get_node_id(Recipient::Node).unwrap(),
+                                                               msg: msgs::Shutdown {
+                                                                       channel_id: [0; 32],
+                                                                       scriptpubkey: bitcoin::Script::new(),
+                                                               },
+                                                       });
+
+                                               if ctr % 2 == 0 {
+                                                       peers[0].timer_tick_occurred();
+                                                       peers[1].timer_tick_occurred();
+                                               }
+                                       }
+
+                                       peers[0].socket_disconnected(&fd_a);
+                                       peers[1].socket_disconnected(&fd_b);
+                                       ctr += 1;
+                                       std::thread::sleep(std::time::Duration::from_micros(1));
+                               }
+                       })
+               } } }
+               let thrd_a = spawn_thread!(1);
+               let thrd_b = spawn_thread!(2);
+
+               thrd_a.join().unwrap();
+               thrd_b.join().unwrap();
+       }
+
+       #[test]
+       fn test_incompatible_peers() {
+               let cfgs = create_peermgr_cfgs(2);
+               let incompatible_cfgs = create_incompatible_peermgr_cfgs(2);
+
+               let peers = create_network(2, &cfgs);
+               let incompatible_peers = create_network(2, &incompatible_cfgs);
+               let peer_pairs = [(&peers[0], &incompatible_peers[0]), (&incompatible_peers[1], &peers[1])];
+               for (peer_a, peer_b) in peer_pairs.iter() {
+                       let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap();
+                       let mut fd_a = FileDescriptor {
+                               fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                               disconnect: Arc::new(AtomicBool::new(false)),
+                       };
+                       let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+                       let mut fd_b = FileDescriptor {
+                               fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                               disconnect: Arc::new(AtomicBool::new(false)),
+                       };
+                       let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+                       let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
+                       peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
+                       assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
+                       peer_a.process_events();
+
+                       let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+                       assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+
+                       peer_b.process_events();
+                       let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+
+                       // Should fail because of unknown required features
+                       assert!(peer_a.read_event(&mut fd_a, &b_data).is_err());
+               }
+       }
+
        #[test]
        fn test_disconnect_peer() {
                // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
                // push a DisconnectPeer event to remove the node flagged by id
                let cfgs = create_peermgr_cfgs(2);
-               let chan_handler = test_utils::TestChannelMessageHandler::new();
-               let mut peers = create_network(2, &cfgs);
+               let peers = create_network(2, &cfgs);
                establish_connection(&peers[0], &peers[1]);
                assert_eq!(peers[0].peers.read().unwrap().len(), 1);
 
                let their_id = peers[1].node_signer.get_node_id(Recipient::Node).unwrap();
-
-               chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError {
+               cfgs[0].chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError {
                        node_id: their_id,
                        action: msgs::ErrorAction::DisconnectPeer { msg: None },
                });
-               assert_eq!(chan_handler.pending_events.lock().unwrap().len(), 1);
-               peers[0].message_handler.chan_handler = &chan_handler;
 
                peers[0].process_events();
                assert_eq!(peers[0].peers.read().unwrap().len(), 0);
@@ -2225,6 +2763,38 @@ mod tests {
                assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false);
        }
 
+       #[test]
+       fn test_non_init_first_msg() {
+               // Simple test of the first message received over a connection being something other than
+               // Init. This results in an immediate disconnection, which previously included a spurious
+               // peer_disconnected event handed to event handlers (which would panic in
+               // `TestChannelMessageHandler` here).
+               let cfgs = create_peermgr_cfgs(2);
+               let peers = create_network(2, &cfgs);
+
+               let mut fd_dup = FileDescriptor {
+                       fd: 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                       disconnect: Arc::new(AtomicBool::new(false)),
+               };
+               let addr_dup = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1003};
+               let id_a = cfgs[0].node_signer.get_node_id(Recipient::Node).unwrap();
+               peers[0].new_inbound_connection(fd_dup.clone(), Some(addr_dup.clone())).unwrap();
+
+               let mut dup_encryptor = PeerChannelEncryptor::new_outbound(id_a, SecretKey::from_slice(&[42; 32]).unwrap());
+               let initial_data = dup_encryptor.get_act_one(&peers[1].secp_ctx);
+               assert_eq!(peers[0].read_event(&mut fd_dup, &initial_data).unwrap(), false);
+               peers[0].process_events();
+
+               let a_data = fd_dup.outbound_data.lock().unwrap().split_off(0);
+               let (act_three, _) =
+                       dup_encryptor.process_act_two(&a_data[..], &&cfgs[1].node_signer).unwrap();
+               assert_eq!(peers[0].read_event(&mut fd_dup, &act_three).unwrap(), false);
+
+               let not_init_msg = msgs::Ping { ponglen: 4, byteslen: 0 };
+               let msg_bytes = dup_encryptor.encrypt_message(&not_init_msg);
+               assert!(peers[0].read_event(&mut fd_dup, &msg_bytes).is_err());
+       }
+
        #[test]
        fn test_disconnect_all_peer() {
                // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
@@ -2309,8 +2879,14 @@ mod tests {
                let peers = create_network(2, &cfgs);
 
                let a_id = peers[0].node_signer.get_node_id(Recipient::Node).unwrap();
-               let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
-               let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
+               let mut fd_a = FileDescriptor {
+                       fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                       disconnect: Arc::new(AtomicBool::new(false)),
+               };
+               let mut fd_b = FileDescriptor {
+                       fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                       disconnect: Arc::new(AtomicBool::new(false)),
+               };
                let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
                peers[0].new_inbound_connection(fd_a.clone(), None).unwrap();