From 4155f54716f6bc8632d2a501d22c51a2545670b1 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 26 Jan 2023 04:45:58 +0000 Subject: [PATCH] Add an `inbound` flag to the `peer_connected` message handlers Its useful for the message handlers to know if a peer is inbound for DoS decision-making reasons. --- fuzz/src/chanmon_consistency.rs | 20 +++++------ lightning-background-processor/src/lib.rs | 4 +-- lightning-net-tokio/src/lib.rs | 4 +-- lightning/src/ln/chanmon_update_fail_tests.rs | 36 +++++++++---------- lightning/src/ln/channelmanager.rs | 6 ++-- lightning/src/ln/functional_test_utils.rs | 8 ++--- lightning/src/ln/functional_tests.rs | 12 +++---- lightning/src/ln/msgs.rs | 6 ++-- lightning/src/ln/payment_tests.rs | 8 ++--- lightning/src/ln/peer_handler.rs | 16 +++++---- lightning/src/ln/priv_short_conf_tests.rs | 8 ++--- lightning/src/ln/reload_tests.rs | 28 +++++++-------- lightning/src/ln/reorg_tests.rs | 2 +- lightning/src/ln/shutdown_tests.rs | 8 ++--- .../src/onion_message/functional_tests.rs | 4 +-- lightning/src/onion_message/messenger.rs | 2 +- lightning/src/routing/gossip.rs | 6 ++-- lightning/src/util/test_utils.rs | 4 +-- 18 files changed, 93 insertions(+), 89 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 154f198c..06d03fd0 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -474,8 +474,8 @@ pub fn do_test(data: &[u8], underlying_out: Out) { let mut channel_txn = Vec::new(); macro_rules! make_channel { ($source: expr, $dest: expr, $chan_id: expr) => { { - $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }).unwrap(); - $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }).unwrap(); + $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }, true).unwrap(); + $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }, false).unwrap(); $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap(); let open_channel = { @@ -995,15 +995,15 @@ pub fn do_test(data: &[u8], underlying_out: Out) { }, 0x0e => { if chan_a_disconnected { - nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap(); - nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }).unwrap(); + nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap(); chan_a_disconnected = false; } }, 0x0f => { if chan_b_disconnected { - nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }).unwrap(); - nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap(); + nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap(); + nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap(); chan_b_disconnected = false; } }, @@ -1198,13 +1198,13 @@ pub fn do_test(data: &[u8], underlying_out: Out) { // Next, make sure peers are all connected to each other if chan_a_disconnected { - nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap(); - nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }).unwrap(); + nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap(); chan_a_disconnected = false; } if chan_b_disconnected { - nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }).unwrap(); - nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap(); + nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap(); + nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap(); chan_b_disconnected = false; } diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index a21f00f8..97d0462e 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -963,8 +963,8 @@ mod tests { for i in 0..num_nodes { for j in (i+1)..num_nodes { - nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }).unwrap(); - nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }).unwrap(); + nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap(); } } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 58d3d0a8..81e6157c 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -617,7 +617,7 @@ mod tests { fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result { Ok(false) } fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option, Option)> { None } fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option { None } - fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { Ok(()) } + fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) } fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) } @@ -649,7 +649,7 @@ mod tests { self.pubkey_disconnected.clone().try_send(()).unwrap(); } } - fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { + fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> { if *their_node_id == self.expected_pubkey { self.pubkey_connected.clone().try_send(()).unwrap(); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 6a626ee6..abc4f4a1 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -351,10 +351,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -373,10 +373,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -1130,8 +1130,8 @@ fn test_monitor_update_fail_reestablish() { commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1149,8 +1149,8 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish); assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish); @@ -1322,8 +1322,8 @@ fn claim_while_disconnected_monitor_update_fail() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1454,8 +1454,8 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -2050,9 +2050,9 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); @@ -2178,9 +2178,9 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); @@ -2314,10 +2314,10 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // Now reconnect the two - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ea5966c7..285df109 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6305,7 +6305,7 @@ where } } - fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> { + fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, _inbound: bool) -> Result<(), ()> { if !init_msg.features.supports_static_remote_key() { log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); return Err(()); @@ -8517,8 +8517,8 @@ pub mod bench { }); let node_b_holder = NodeHolder { node: &node_b }; - node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }).unwrap(); - node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }).unwrap(); + node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap(); + node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap(); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap(); node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 0c21588f..fd867bc6 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2374,8 +2374,8 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_channel_ready: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) { - node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: node_b.node.init_features(), remote_network_address: None }).unwrap(); + node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: node_b.node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); - node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: node_a.node.init_features(), remote_network_address: None }).unwrap(); + node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: node_a.node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); if send_channel_ready.0 { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8a5a77b9..65f27da1 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3979,10 +3979,10 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -6294,10 +6294,10 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { //Disconnect and Reconnect nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); @@ -7053,10 +7053,10 @@ fn test_announce_disable_channels() { } } // Reconnect peers - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 3); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 3); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index d43dff6e..6e49a46f 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -1001,7 +1001,7 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { /// May return an `Err(())` if the features the peer supports are not sufficient to communicate /// with us. Implementors should be somewhat conservative about doing so, however, as other /// message handlers may still wish to communicate with this peer. - fn peer_connected(&self, their_node_id: &PublicKey, msg: &Init) -> Result<(), ()>; + fn peer_connected(&self, their_node_id: &PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>; /// Handle an incoming `channel_reestablish` message from the given peer. fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish); @@ -1059,7 +1059,7 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider { /// May return an `Err(())` if the features the peer supports are not sufficient to communicate /// with us. Implementors should be somewhat conservative about doing so, however, as other /// message handlers may still wish to communicate with this peer. - fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>; + fn peer_connected(&self, their_node_id: &PublicKey, init: &Init, inbound: bool) -> Result<(), ()>; /// Handles the reply of a query we initiated to learn about channels /// for a given range of blocks. We can expect to receive one or more /// replies to a single query. @@ -1106,7 +1106,7 @@ pub trait OnionMessageHandler : OnionMessageProvider { /// May return an `Err(())` if the features the peer supports are not sufficient to communicate /// with us. Implementors should be somewhat conservative about doing so, however, as other /// message handlers may still wish to communicate with this peer. - fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>; + fn peer_connected(&self, their_node_id: &PublicKey, init: &Init, inbound: bool) -> Result<(), ()>; /// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to /// drop and refuse to forward onion messages to this peer. fn peer_disconnected(&self, their_node_id: &PublicKey); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 9a957e37..249051c4 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -341,12 +341,12 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { assert_eq!(as_broadcasted_txn[0], as_commitment_tx); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); @@ -505,12 +505,12 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 814bf304..e9eaf33e 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -79,7 +79,7 @@ impl RoutingMessageHandler for IgnoringMessageHandler { fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(msgs::ChannelAnnouncement, Option, Option)> { None } fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option { None } - fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) } + fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) } @@ -95,7 +95,7 @@ impl OnionMessageProvider for IgnoringMessageHandler { } impl OnionMessageHandler for IgnoringMessageHandler { fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {} - fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) } + fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } fn peer_disconnected(&self, _their_node_id: &PublicKey) {} fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { @@ -231,7 +231,7 @@ impl ChannelMessageHandler for ErroringMessageHandler { // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them. fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {} fn peer_disconnected(&self, _their_node_id: &PublicKey) {} - fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) } + fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {} fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { @@ -425,6 +425,8 @@ struct Peer { /// `channel_announcement` at all - we set this unconditionally but unset it every time we /// check if we're gossip-processing-backlogged). received_channel_announce_since_backlogged: bool, + + inbound_connection: bool, } impl Peer { @@ -836,6 +838,7 @@ impl Vec { let mut features = InitFeatures::empty(); features.set_onion_messages_optional(); let init_msg = msgs::Init { features, remote_network_address: None }; - nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone()).unwrap(); - nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone()).unwrap(); + nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone(), true).unwrap(); + nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone(), false).unwrap(); } nodes } diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 2c71e9c3..7814ccb6 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -417,7 +417,7 @@ impl OnionMessageHandler for OnionMe }; } - fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> { + fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init, _inbound: bool) -> Result<(), ()> { if init.features.supports_onion_messages() { let mut peers = self.pending_messages.lock().unwrap(); peers.insert(their_node_id.clone(), VecDeque::new()); diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 82d2cd4c..cf51b6ab 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -437,7 +437,7 @@ where U::Target: UtxoLookup, L::Target: Logger /// to request gossip messages for each channel. The sync is considered complete /// when the final reply_scids_end message is received, though we are not /// tracking this directly. - fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init) -> Result<(), ()> { + fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init, _inbound: bool) -> Result<(), ()> { // We will only perform a sync with peers that support gossip_queries. if !init_msg.features.supports_gossip_queries() { // Don't disconnect peers for not supporting gossip queries. We may wish to have @@ -2791,7 +2791,7 @@ pub(crate) mod tests { // It should ignore if gossip_queries feature is not enabled { let init_msg = Init { features: InitFeatures::empty(), remote_network_address: None }; - gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap(); + gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap(); let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); } @@ -2801,7 +2801,7 @@ pub(crate) mod tests { let mut features = InitFeatures::empty(); features.set_gossip_queries_optional(); let init_msg = Init { features, remote_network_address: None }; - gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap(); + gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap(); let events = gossip_sync.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 8dc98ee4..344f5b4c 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -427,7 +427,7 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn peer_disconnected(&self, their_node_id: &PublicKey) { assert!(self.connected_peers.lock().unwrap().remove(their_node_id)); } - fn peer_connected(&self, their_node_id: &PublicKey, _msg: &msgs::Init) -> Result<(), ()> { + fn peer_connected(&self, their_node_id: &PublicKey, _msg: &msgs::Init, _inbound: bool) -> Result<(), ()> { assert!(self.connected_peers.lock().unwrap().insert(their_node_id.clone())); // Don't bother with `received_msg` for Init as its auto-generated and we don't want to // bother re-generating the expected Init message in all tests. @@ -544,7 +544,7 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { None } - fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> { + fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init, _inbound: bool) -> Result<(), ()> { if !init_msg.features.supports_gossip_queries() { return Ok(()); } -- 2.30.2