Merge pull request #1988 from TheBlueMatt/2023-01-limited-chans
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 22 Feb 2023 00:39:13 +0000 (00:39 +0000)
committerGitHub <noreply@github.com>
Wed, 22 Feb 2023 00:39:13 +0000 (00:39 +0000)
Limit the number of pending un-funded inbound channel

18 files changed:
fuzz/src/chanmon_consistency.rs
lightning-background-processor/src/lib.rs
lightning-net-tokio/src/lib.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/onion_message/functional_tests.rs
lightning/src/onion_message/messenger.rs
lightning/src/routing/gossip.rs
lightning/src/util/test_utils.rs

index 154f198c3286b0d297750f924650d177fa4617cd..06d03fd0e9b28d5cb10e71eb87b8a7c5a02257a7 100644 (file)
@@ -474,8 +474,8 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
        let mut channel_txn = Vec::new();
        macro_rules! make_channel {
                ($source: expr, $dest: expr, $chan_id: expr) => { {
-                       $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }).unwrap();
-                       $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }).unwrap();
+                       $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }, true).unwrap();
+                       $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }, false).unwrap();
 
                        $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
                        let open_channel = {
@@ -995,15 +995,15 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        },
                        0x0e => {
                                if chan_a_disconnected {
-                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
-                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }).unwrap();
+                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
+                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
                                        chan_a_disconnected = false;
                                }
                        },
                        0x0f => {
                                if chan_b_disconnected {
-                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }).unwrap();
-                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
+                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
+                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
                                        chan_b_disconnected = false;
                                }
                        },
@@ -1198,13 +1198,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
                                // Next, make sure peers are all connected to each other
                                if chan_a_disconnected {
-                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
-                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }).unwrap();
+                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
+                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
                                        chan_a_disconnected = false;
                                }
                                if chan_b_disconnected {
-                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }).unwrap();
-                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
+                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
+                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
                                        chan_b_disconnected = false;
                                }
 
index a21f00f867cb36c1dc4d5c596f29dc2af551c5de..97d0462eb5010980850c64b53dc37a07c512f476 100644 (file)
@@ -963,8 +963,8 @@ mod tests {
 
                for i in 0..num_nodes {
                        for j in (i+1)..num_nodes {
-                               nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }).unwrap();
-                               nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }).unwrap();
+                               nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
+                               nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
                        }
                }
 
index 58d3d0a8addb137e66a968f7a24e58d1accc710a..81e6157c656f6e39e76525e33ca09e1b55870325 100644 (file)
@@ -617,7 +617,7 @@ mod tests {
                fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
                fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { None }
                fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<NodeAnnouncement> { None }
-               fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { Ok(()) }
+               fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
                fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
                fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
                fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
@@ -649,7 +649,7 @@ mod tests {
                                self.pubkey_disconnected.clone().try_send(()).unwrap();
                        }
                }
-               fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> {
+               fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> {
                        if *their_node_id == self.expected_pubkey {
                                self.pubkey_connected.clone().try_send(()).unwrap();
                        }
index 6a626ee61d5a7ee30a1e56555499809086668bf6..abc4f4a140707cc4792b20131a96eb83d2d09e9d 100644 (file)
@@ -351,10 +351,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
                assert_eq!(reestablish_1.len(), 1);
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                assert_eq!(reestablish_2.len(), 1);
 
@@ -373,10 +373,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
                assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
                assert_eq!(reestablish_1.len(), 1);
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                assert_eq!(reestablish_2.len(), 1);
 
@@ -1130,8 +1130,8 @@ fn test_monitor_update_fail_reestablish() {
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
        let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
@@ -1149,8 +1149,8 @@ fn test_monitor_update_fail_reestablish() {
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
        assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
        assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
@@ -1322,8 +1322,8 @@ fn claim_while_disconnected_monitor_update_fail() {
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
        let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
        let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
@@ -1454,8 +1454,8 @@ fn monitor_failed_no_reestablish_response() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
        let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
        let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
@@ -2050,9 +2050,9 @@ fn test_pending_update_fee_ack_on_reconnect() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
@@ -2178,9 +2178,9 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
@@ -2314,10 +2314,10 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
                // Now reconnect the two
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
                assert_eq!(reestablish_1.len(), 1);
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                assert_eq!(reestablish_2.len(), 1);
 
index ea5966c722e4a4d3df9ba5541a04111d446b6a87..45f45e138629d7dc6ddf2ed27b04d45e55e174ed 100644 (file)
@@ -593,6 +593,15 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = C
 /// offline for a full minute. In order to track this, you must call
 /// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
 ///
+/// To avoid trivial DoS issues, ChannelManager limits the number of inbound connections and
+/// inbound channels without confirmed funding transactions. This may result in nodes which we do
+/// not have a channel with being unable to connect to us or open new channels with us if we have
+/// many peers with unfunded channels.
+///
+/// Because it is an indication of trust, inbound channels which we've accepted as 0conf are
+/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
+/// never limited. Please ensure you limit the count of such channels yourself.
+///
 /// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
 /// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
 /// essentially you should default to using a SimpleRefChannelManager, and use a
@@ -943,6 +952,19 @@ pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
 /// [`OutboundPayments::remove_stale_resolved_payments`].
 pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
 
+/// The maximum number of unfunded channels we can have per-peer before we start rejecting new
+/// (inbound) ones. The number of peers with unfunded channels is limited separately in
+/// [`MAX_UNFUNDED_CHANNEL_PEERS`].
+const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
+
+/// The maximum number of peers from which we will allow pending unfunded channels. Once we reach
+/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
+const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
+
+/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
+/// many peers we reject new (inbound) connections.
+const MAX_NO_CHANNEL_PEERS: usize = 250;
+
 /// Information needed for constructing an invoice route hint for this channel.
 #[derive(Clone, Debug, PartialEq)]
 pub struct CounterpartyForwardingInfo {
@@ -4274,11 +4296,13 @@ where
        fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
+               let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty());
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
+               let is_only_peer_channel = peer_state.channel_by_id.len() == 1;
                match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
                        hash_map::Entry::Occupied(mut channel) => {
                                if !channel.get().inbound_is_awaiting_accept() {
@@ -4296,6 +4320,21 @@ where
                                        peer_state.pending_msg_events.push(send_msg_err_event);
                                        let _ = remove_channel!(self, channel);
                                        return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                               } else {
+                                       // If this peer already has some channels, a new channel won't increase our number of peers
+                                       // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+                                       // channels per-peer we can accept channels from a peer with existing ones.
+                                       if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+                                               let send_msg_err_event = events::MessageSendEvent::HandleError {
+                                                       node_id: channel.get().get_counterparty_node_id(),
+                                                       action: msgs::ErrorAction::SendErrorMessage{
+                                                               msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+                                                       }
+                                               };
+                                               peer_state.pending_msg_events.push(send_msg_err_event);
+                                               let _ = remove_channel!(self, channel);
+                                               return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+                                       }
                                }
 
                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
@@ -4310,6 +4349,43 @@ where
                Ok(())
        }
 
+       /// Gets the number of peers which match the given filter and do not have any funded, outbound,
+       /// or 0-conf channels.
+       ///
+       /// The filter is called for each peer and provided with the number of unfunded, inbound, and
+       /// non-0-conf channels we have with the peer.
+       fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
+       where Filter: Fn(&PeerState<<SP::Target as SignerProvider>::Signer>) -> bool {
+               let mut peers_without_funded_channels = 0;
+               let best_block_height = self.best_block.read().unwrap().height();
+               {
+                       let peer_state_lock = self.per_peer_state.read().unwrap();
+                       for (_, peer_mtx) in peer_state_lock.iter() {
+                               let peer = peer_mtx.lock().unwrap();
+                               if !maybe_count_peer(&*peer) { continue; }
+                               let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
+                               if num_unfunded_channels == peer.channel_by_id.len() {
+                                       peers_without_funded_channels += 1;
+                               }
+                       }
+               }
+               return peers_without_funded_channels;
+       }
+
+       fn unfunded_channel_count(
+               peer: &PeerState<<SP::Target as SignerProvider>::Signer>, best_block_height: u32
+       ) -> usize {
+               let mut num_unfunded_channels = 0;
+               for (_, chan) in peer.channel_by_id.iter() {
+                       if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
+                               chan.get_funding_tx_confirmations(best_block_height) == 0
+                       {
+                               num_unfunded_channels += 1;
+                       }
+               }
+               num_unfunded_channels
+       }
+
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                if msg.chain_hash != self.genesis_hash {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
@@ -4322,8 +4398,13 @@ where
                let mut random_bytes = [0u8; 16];
                random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
                let user_channel_id = u128::from_be_bytes(random_bytes);
-
                let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+
+               // Get the number of peers with channels, but without funded ones. We don't care too much
+               // about peers that never open a channel, so we filter by peers that have at least one
+               // channel, and then limit the number of those with unfunded channels.
+               let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty());
+
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                    .ok_or_else(|| {
@@ -4332,9 +4413,29 @@ where
                        })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
+
+               // If this peer already has some channels, a new channel won't increase our number of peers
+               // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+               // channels per-peer we can accept channels from a peer with existing ones.
+               if peer_state.channel_by_id.is_empty() &&
+                       channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
+                       !self.default_configuration.manually_accept_inbound_channels
+               {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                               "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
+                               msg.temporary_channel_id.clone()));
+               }
+
+               let best_block_height = self.best_block.read().unwrap().height();
+               if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                               format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
+                               msg.temporary_channel_id.clone()));
+               }
+
                let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
-                       counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration,
-                       self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
+                       counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
+                       &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
                {
                        Err(e) => {
                                self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
@@ -6305,20 +6406,28 @@ where
                }
        }
 
-       fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
+       fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
                if !init_msg.features.supports_static_remote_key() {
                        log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
                        return Err(());
                }
 
-               log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
-
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
+               // If we have too many peers connected which don't have funded channels, disconnect the
+               // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
+               // unfunded channels taking up space in memory for disconnected peers, we still let new
+               // peers connect, but we'll reject new channels from them.
+               let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
+               let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
+
                {
                        let mut peer_state_lock = self.per_peer_state.write().unwrap();
                        match peer_state_lock.entry(counterparty_node_id.clone()) {
                                hash_map::Entry::Vacant(e) => {
+                                       if inbound_peer_limited {
+                                               return Err(());
+                                       }
                                        e.insert(Mutex::new(PeerState {
                                                channel_by_id: HashMap::new(),
                                                latest_features: init_msg.features.clone(),
@@ -6329,14 +6438,24 @@ where
                                hash_map::Entry::Occupied(e) => {
                                        let mut peer_state = e.get().lock().unwrap();
                                        peer_state.latest_features = init_msg.features.clone();
+
+                                       let best_block_height = self.best_block.read().unwrap().height();
+                                       if inbound_peer_limited &&
+                                               Self::unfunded_channel_count(&*peer_state, best_block_height) ==
+                                               peer_state.channel_by_id.len()
+                                       {
+                                               return Err(());
+                                       }
+
                                        debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
                                        peer_state.is_connected = true;
                                },
                        }
                }
 
-               let per_peer_state = self.per_peer_state.read().unwrap();
+               log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
 
+               let per_peer_state = self.per_peer_state.read().unwrap();
                for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
@@ -8407,6 +8526,213 @@ mod tests {
                check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
        }
 
+       #[test]
+       fn test_connection_limiting() {
+               // Test that we limit un-channel'd peers and un-funded channels properly.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               // Note that create_network connects the nodes together for us
+
+               nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+               let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+               let mut funding_tx = None;
+               for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
+                       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+                       let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+
+                       if idx == 0 {
+                               nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
+                               let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
+                               funding_tx = Some(tx.clone());
+                               nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
+                               let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+                               nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+                               check_added_monitors!(nodes[1], 1);
+                               let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+
+                               nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+                               check_added_monitors!(nodes[0], 1);
+                       }
+                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               }
+
+               // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
+               open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+               assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+                       open_channel_msg.temporary_channel_id);
+
+               // Further, because all of our channels with nodes[0] are inbound, and none of them funded,
+               // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
+               // limit.
+               let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
+               for _ in 1..super::MAX_NO_CHANNEL_PEERS {
+                       let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+                               &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+                       peer_pks.push(random_pk);
+                       nodes[1].node.peer_connected(&random_pk, &msgs::Init {
+                               features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+               }
+               let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+                       &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+               nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+
+               // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
+               // them if we have too many un-channel'd peers.
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+               let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
+               for ev in chan_closed_events {
+                       if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
+               }
+               nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+
+               // but of course if the connection is outbound its allowed...
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+               // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
+               // Even though we accept one more connection from new peers, we won't actually let them
+               // open channels.
+               assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
+               for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
+                       nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
+                       get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
+                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               }
+               nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+               assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
+                       open_channel_msg.temporary_channel_id);
+
+               // Of course, however, outbound channels are always allowed
+               nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None).unwrap();
+               get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
+
+               // If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
+               // "protected" and can connect again.
+               mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+               get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+               // Further, because the first channel was funded, we can open another channel with
+               // last_random_pk.
+               nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+               get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
+       }
+
+       #[test]
+       fn test_outbound_chans_unlimited() {
+               // Test that we never refuse an outbound channel even if a peer is unfuned-channel-limited
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               // Note that create_network connects the nodes together for us
+
+               nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+               let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+               for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
+                       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+                       get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               }
+
+               // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
+               // rejected.
+               nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+               assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+                       open_channel_msg.temporary_channel_id);
+
+               // but we can still open an outbound channel.
+               nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+               get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
+
+               // but even with such an outbound channel, additional inbound channels will still fail.
+               nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+               assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+                       open_channel_msg.temporary_channel_id);
+       }
+
+       #[test]
+       fn test_0conf_limiting() {
+               // Tests that we properly limit inbound channels when we have the manual-channel-acceptance
+               // flag set and (sometimes) accept channels as 0conf.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let mut settings = test_default_channel_config();
+               settings.manually_accept_inbound_channels = true;
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               // Note that create_network connects the nodes together for us
+
+               nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+               let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+               // First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge
+               for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
+                       let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+                               &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+                       nodes[1].node.peer_connected(&random_pk, &msgs::Init {
+                               features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+
+                       nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
+                       let events = nodes[1].node.get_and_clear_pending_events();
+                       match events[0] {
+                               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                                       nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
+                               }
+                               _ => panic!("Unexpected event"),
+                       }
+                       get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
+                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               }
+
+               // If we try to accept a channel from another peer non-0conf it will fail.
+               let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+                       &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+               nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+               nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               match events[0] {
+                       Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                               match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
+                                       Err(APIError::APIMisuseError { err }) =>
+                                               assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
+                                       _ => panic!(),
+                               }
+                       }
+                       _ => panic!("Unexpected event"),
+               }
+               assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
+                       open_channel_msg.temporary_channel_id);
+
+               // ...however if we accept the same channel 0conf it should work just fine.
+               nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               match events[0] {
+                       Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                               nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
+                       }
+                       _ => panic!("Unexpected event"),
+               }
+               get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
+       }
+
        #[cfg(anchors)]
        #[test]
        fn test_anchors_zero_fee_htlc_tx_fallback() {
@@ -8517,8 +8843,8 @@ pub mod bench {
                });
                let node_b_holder = NodeHolder { node: &node_b };
 
-               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }).unwrap();
-               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }).unwrap();
+               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
+               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
                node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
                node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
index 0c21588fb717af4aee2ef93a52f9da16abaaaaa3..fd867bc6ab7e07287c0572ae1cdb577e9e290ab8 100644 (file)
@@ -2374,8 +2374,8 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeC
 
        for i in 0..node_count {
                for j in (i+1)..node_count {
-                       nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: nodes[j].override_init_features.borrow().clone().unwrap_or_else(|| nodes[j].node.init_features()), remote_network_address: None }).unwrap();
-                       nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: nodes[i].override_init_features.borrow().clone().unwrap_or_else(|| nodes[i].node.init_features()), remote_network_address: None }).unwrap();
+                       nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: nodes[j].override_init_features.borrow().clone().unwrap_or_else(|| nodes[j].node.init_features()), remote_network_address: None }, true).unwrap();
+                       nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: nodes[i].override_init_features.borrow().clone().unwrap_or_else(|| nodes[i].node.init_features()), remote_network_address: None }, false).unwrap();
                }
        }
 
@@ -2658,9 +2658,9 @@ macro_rules! handle_chan_reestablish_msgs {
 /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
 /// for claims/fails they are separated out.
 pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_channel_ready: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
-       node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: node_b.node.init_features(), remote_network_address: None }).unwrap();
+       node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: node_b.node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
-       node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: node_a.node.init_features(), remote_network_address: None }).unwrap();
+       node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: node_a.node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
 
        if send_channel_ready.0 {
index 8a5a77b90c373ccb6ff71019b15e8bfda1e45b8c..65f27da13559ac1d4c067aafc694540894acdc07 100644 (file)
@@ -3979,10 +3979,10 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
 
@@ -6294,10 +6294,10 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        //Disconnect and Reconnect
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
@@ -7053,10 +7053,10 @@ fn test_announce_disable_channels() {
                }
        }
        // Reconnect peers
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 3);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 3);
 
index d43dff6e1d3b18b9fe895ef295390730ffc59dfa..6e49a46f08a02a25065652e28cf3895afc031fe8 100644 (file)
@@ -1001,7 +1001,7 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider {
        /// May return an `Err(())` if the features the peer supports are not sufficient to communicate
        /// with us. Implementors should be somewhat conservative about doing so, however, as other
        /// message handlers may still wish to communicate with this peer.
-       fn peer_connected(&self, their_node_id: &PublicKey, msg: &Init) -> Result<(), ()>;
+       fn peer_connected(&self, their_node_id: &PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>;
        /// Handle an incoming `channel_reestablish` message from the given peer.
        fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish);
 
@@ -1059,7 +1059,7 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider {
        /// May return an `Err(())` if the features the peer supports are not sufficient to communicate
        /// with us. Implementors should be somewhat conservative about doing so, however, as other
        /// message handlers may still wish to communicate with this peer.
-       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>;
+       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init, inbound: bool) -> Result<(), ()>;
        /// Handles the reply of a query we initiated to learn about channels
        /// for a given range of blocks. We can expect to receive one or more
        /// replies to a single query.
@@ -1106,7 +1106,7 @@ pub trait OnionMessageHandler : OnionMessageProvider {
        /// May return an `Err(())` if the features the peer supports are not sufficient to communicate
        /// with us. Implementors should be somewhat conservative about doing so, however, as other
        /// message handlers may still wish to communicate with this peer.
-       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>;
+       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init, inbound: bool) -> Result<(), ()>;
        /// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to
        /// drop and refuse to forward onion messages to this peer.
        fn peer_disconnected(&self, their_node_id: &PublicKey);
index 9a957e37203799014c6853b6c9ffad93f360e37d..249051c40ba337620aad601d7b09e232dab34b79 100644 (file)
@@ -341,12 +341,12 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
        // error, as the channel has hit the chain.
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
@@ -505,12 +505,12 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        assert!(nodes[0].node.has_pending_payments());
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
        // error, as the channel has hit the chain.
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
index 814bf304d8d4eb0e6aebd67f08ac9b7015b931ae..e9eaf33e8840b021afe7fbcfc60afec3e0fa5b3d 100644 (file)
@@ -79,7 +79,7 @@ impl RoutingMessageHandler for IgnoringMessageHandler {
        fn get_next_channel_announcement(&self, _starting_point: u64) ->
                Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { None }
        fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<msgs::NodeAnnouncement> { None }
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
        fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
        fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
        fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
@@ -95,7 +95,7 @@ impl OnionMessageProvider for IgnoringMessageHandler {
 }
 impl OnionMessageHandler for IgnoringMessageHandler {
        fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {}
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
        fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
        fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
        fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
@@ -231,7 +231,7 @@ impl ChannelMessageHandler for ErroringMessageHandler {
        // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them.
        fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
        fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
        fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
        fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
        fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
@@ -425,6 +425,8 @@ struct Peer {
        /// `channel_announcement` at all - we set this unconditionally but unset it every time we
        /// check if we're gossip-processing-backlogged).
        received_channel_announce_since_backlogged: bool,
+
+       inbound_connection: bool,
 }
 
 impl Peer {
@@ -836,6 +838,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        sent_gossip_timestamp_filter: false,
 
                        received_channel_announce_since_backlogged: false,
+                       inbound_connection: false,
                })).is_some() {
                        panic!("PeerManager driver duplicated descriptors!");
                };
@@ -885,6 +888,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        sent_gossip_timestamp_filter: false,
 
                        received_channel_announce_since_backlogged: false,
+                       inbound_connection: true,
                })).is_some() {
                        panic!("PeerManager driver duplicated descriptors!");
                };
@@ -1317,15 +1321,15 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0);
                        }
 
-                       if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg) {
+                       if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
                                log_debug!(self.logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
                                return Err(PeerHandleError { }.into());
                        }
-                       if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg) {
+                       if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
                                log_debug!(self.logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
                                return Err(PeerHandleError { }.into());
                        }
-                       if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg) {
+                       if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
                                log_debug!(self.logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
                                return Err(PeerHandleError { }.into());
                        }
index b47380dfd6379a9f6b43710c3f725d9645940a50..7636f5c63641edc5e18cb5d7f69d3375a9fa235b 100644 (file)
@@ -100,8 +100,8 @@ fn test_priv_forwarding_rejection() {
        no_announce_cfg.accept_forwards_to_priv_channels = true;
        reload_node!(nodes[1], no_announce_cfg, &nodes_1_serialized, &[&monitor_a_serialized, &monitor_b_serialized], persister, new_chain_monitor, nodes_1_deserialized);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
@@ -109,8 +109,8 @@ fn test_priv_forwarding_rejection() {
        get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
        get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
 
-       nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[2]).pop().unwrap();
        let cs_reestablish = get_chan_reestablish_msgs!(nodes[2], nodes[1]).pop().unwrap();
        nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
index 6643d34ca17005e8bdeb62a6a52a27f287245903..14deefd3f93828181f6cfad952d48913c8e25586 100644 (file)
@@ -60,9 +60,9 @@ fn test_funding_peer_disconnect() {
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert!(events_2.is_empty());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
@@ -196,9 +196,9 @@ fn test_no_txn_manager_serialize_deserialize() {
                get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -278,9 +278,9 @@ fn test_manager_serialize_deserialize_events() {
        // Make sure the channel is functioning as though the de/serialization never happened
        assert_eq!(nodes[0].node.list_channels().len(), 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -443,9 +443,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        //... and we can even still claim the payment!
        claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
 
-       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap();
-       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
        let mut found_err = false;
        for msg_event in nodes[0].node.get_and_clear_pending_msg_events() {
@@ -494,8 +494,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized);
 
        if reconnect_panicing {
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 
@@ -543,8 +543,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        // after the warning message sent by B, we should not able to
        // use the channel, or reconnect with success to the channel.
        assert!(nodes[0].node.list_usable_channels().is_empty());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
@@ -779,9 +779,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        if !persist_both_monitors {
                // If one of the two channels is still live, reveal the payment preimage over it.
 
-               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
-               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
 
                nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
index ce1f3b64750bafbd7343ff66f47e8b16677d30a4..69e0c9afa9e49c056c7d081ef8e42913d7492d55 100644 (file)
@@ -378,7 +378,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                // If we dropped the channel before reloading the node, nodes[1] was also dropped from
                // nodes[0] storage, and hence not connected again on startup. We therefore need to
                // reconnect to the node before attempting to create a new channel.
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        }
        create_announced_chan_between_nodes(&nodes, 0, 1);
        send_payment(&nodes[0], &[&nodes[1]], 8000000);
index 59c9911df946753139240e22c6e0534eb9ab04d8..75eacb74bc2fe7352a99288e28fcef82eac4c81d 100644 (file)
@@ -246,9 +246,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let node_0_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let node_1_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish);
@@ -308,9 +308,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
        let node_1_2nd_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, false).unwrap();
        if recv_count == 0 {
                // If all closing_signeds weren't delivered we can just resume where we left off...
                let node_0_2nd_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
index 1309baf60eac7ed85a5493ec8ca102653cad6eb6..aee7f562defc5915517d696cf032353deee4e3d6 100644 (file)
@@ -85,8 +85,8 @@ fn create_nodes(num_messengers: u8) -> Vec<MessengerNode> {
                let mut features = InitFeatures::empty();
                features.set_onion_messages_optional();
                let init_msg = msgs::Init { features, remote_network_address: None };
-               nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone()).unwrap();
-               nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone()).unwrap();
+               nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone(), true).unwrap();
+               nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone(), false).unwrap();
        }
        nodes
 }
index 2c71e9c3fb34e2d15d13933dbf0e6b37a17f570a..7814ccb6508c77a10abfa32ee189914493a62278 100644 (file)
@@ -417,7 +417,7 @@ impl<ES: Deref, NS: Deref, L: Deref, CMH: Deref> OnionMessageHandler for OnionMe
                };
        }
 
-       fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> {
+       fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init, _inbound: bool) -> Result<(), ()> {
                if init.features.supports_onion_messages() {
                        let mut peers = self.pending_messages.lock().unwrap();
                        peers.insert(their_node_id.clone(), VecDeque::new());
index 82d2cd4cb981fa4e8d532fabdbc8c0b60ad62897..cf51b6ab52827943626ede2cbc5d0e87e1f95366 100644 (file)
@@ -437,7 +437,7 @@ where U::Target: UtxoLookup, L::Target: Logger
        /// to request gossip messages for each channel. The sync is considered complete
        /// when the final reply_scids_end message is received, though we are not
        /// tracking this directly.
-       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init) -> Result<(), ()> {
+       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init, _inbound: bool) -> Result<(), ()> {
                // We will only perform a sync with peers that support gossip_queries.
                if !init_msg.features.supports_gossip_queries() {
                        // Don't disconnect peers for not supporting gossip queries. We may wish to have
@@ -2791,7 +2791,7 @@ pub(crate) mod tests {
                // It should ignore if gossip_queries feature is not enabled
                {
                        let init_msg = Init { features: InitFeatures::empty(), remote_network_address: None };
-                       gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap();
+                       gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap();
                        let events = gossip_sync.get_and_clear_pending_msg_events();
                        assert_eq!(events.len(), 0);
                }
@@ -2801,7 +2801,7 @@ pub(crate) mod tests {
                        let mut features = InitFeatures::empty();
                        features.set_gossip_queries_optional();
                        let init_msg = Init { features, remote_network_address: None };
-                       gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap();
+                       gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap();
                        let events = gossip_sync.get_and_clear_pending_msg_events();
                        assert_eq!(events.len(), 1);
                        match &events[0] {
index 8dc98ee43d7cb3ef384b86a349f9f2d10ea4f714..344f5b4c62d01736afd85654064735c68f0456bd 100644 (file)
@@ -427,7 +427,7 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
        fn peer_disconnected(&self, their_node_id: &PublicKey) {
                assert!(self.connected_peers.lock().unwrap().remove(their_node_id));
        }
-       fn peer_connected(&self, their_node_id: &PublicKey, _msg: &msgs::Init) -> Result<(), ()> {
+       fn peer_connected(&self, their_node_id: &PublicKey, _msg: &msgs::Init, _inbound: bool) -> Result<(), ()> {
                assert!(self.connected_peers.lock().unwrap().insert(their_node_id.clone()));
                // Don't bother with `received_msg` for Init as its auto-generated and we don't want to
                // bother re-generating the expected Init message in all tests.
@@ -544,7 +544,7 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
                None
        }
 
-       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
+       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init, _inbound: bool) -> Result<(), ()> {
                if !init_msg.features.supports_gossip_queries() {
                        return Ok(());
                }