From: Matt Corallo Date: Tue, 21 Feb 2023 19:10:43 +0000 (+0000) Subject: Remove the `peer_disconnected` `no_connection_possible` flag X-Git-Tag: v0.0.114-beta~11^2 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=refs%2Fheads%2F2023-02-fix-no-con-discon;p=rust-lightning Remove the `peer_disconnected` `no_connection_possible` flag Long ago, we used the `no_connection_possible` to signal that a peer has some unknown feature set or some other condition prevents us from ever connecting to the given peer. In that case we'd automatically force-close all channels with the given peer. This was somewhat surprising to users so we removed the automatic force-close, leaving the flag serving no LDK-internal purpose. Distilling the concept of "can we connect to this peer again in the future" to a simple flag turns out to be ripe with edge cases, so users actually using the flag to force-close channels would likely cause surprising behavior. Thus, there's really not a lot of reason to keep the flag, especially given its untested and likely to be broken in subtle ways anyway. --- diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index a088f64f4..457e1e45b 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -978,16 +978,16 @@ pub fn do_test(data: &[u8], underlying_out: Out) { 0x0c => { if !chan_a_disconnected { - nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false); - nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false); + nodes[0].peer_disconnected(&nodes[1].get_our_node_id()); + nodes[1].peer_disconnected(&nodes[0].get_our_node_id()); chan_a_disconnected = true; drain_msg_events_on_disconnect!(0); } }, 0x0d => { if !chan_b_disconnected { - nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false); - nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false); + nodes[1].peer_disconnected(&nodes[2].get_our_node_id()); + nodes[2].peer_disconnected(&nodes[1].get_our_node_id()); chan_b_disconnected = true; drain_msg_events_on_disconnect!(2); } @@ -1039,7 +1039,7 @@ pub fn do_test(data: &[u8], underlying_out: Out) { 0x2c => { if !chan_a_disconnected { - nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false); + nodes[1].peer_disconnected(&nodes[0].get_our_node_id()); chan_a_disconnected = true; drain_msg_events_on_disconnect!(0); } @@ -1053,14 +1053,14 @@ pub fn do_test(data: &[u8], underlying_out: Out) { }, 0x2d => { if !chan_a_disconnected { - nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false); + nodes[0].peer_disconnected(&nodes[1].get_our_node_id()); chan_a_disconnected = true; nodes[0].get_and_clear_pending_msg_events(); ab_events.clear(); ba_events.clear(); } if !chan_b_disconnected { - nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false); + nodes[2].peer_disconnected(&nodes[1].get_our_node_id()); chan_b_disconnected = true; nodes[2].get_and_clear_pending_msg_events(); bc_events.clear(); @@ -1072,7 +1072,7 @@ pub fn do_test(data: &[u8], underlying_out: Out) { }, 0x2e => { if !chan_b_disconnected { - nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false); + nodes[1].peer_disconnected(&nodes[2].get_our_node_id()); chan_b_disconnected = true; drain_msg_events_on_disconnect!(2); } diff --git a/lightning-invoice/src/utils.rs b/lightning-invoice/src/utils.rs index f6cc87fa7..868f0b297 100644 --- a/lightning-invoice/src/utils.rs +++ b/lightning-invoice/src/utils.rs @@ -842,13 +842,13 @@ mod test { // With only one sufficient-value peer connected we should only get its hint scid_aliases.remove(&chan_b.0.short_channel_id_alias.unwrap()); - nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id()); match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases.clone()); // If we don't have any sufficient-value peers connected we should get all hints with // sufficient value, even though there is a connected insufficient-value peer. scid_aliases.insert(chan_b.0.short_channel_id_alias.unwrap()); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases); } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index b259f77ef..58d3d0a8a 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -643,7 +643,7 @@ mod tests { fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {} fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {} fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {} - fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) { + fn peer_disconnected(&self, their_node_id: &PublicKey) { if *their_node_id == self.expected_pubkey { self.disconnected_flag.store(true, Ordering::SeqCst); self.pubkey_disconnected.clone().try_send(()).unwrap(); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e1ea256a6..4385fb718 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -181,8 +181,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { assert_eq!(nodes[0].node.list_channels().len(), 1); if disconnect { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } @@ -234,8 +234,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { assert_eq!(nodes[0].node.list_channels().len(), 1); if disconnect { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } @@ -337,8 +337,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { }; if disconnect_count & !disconnect_flags > 0 { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); } // Now fix monitor updating... @@ -348,8 +348,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { check_added_monitors!(nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => { { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); @@ -1110,8 +1110,8 @@ fn test_monitor_update_fail_reestablish() { let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); @@ -1146,8 +1146,8 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell check_added_monitors!(nodes[1], 1); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); @@ -1315,8 +1315,8 @@ fn claim_while_disconnected_monitor_update_fail() { // Forward a payment for B to claim let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); @@ -1451,8 +1451,8 @@ fn monitor_failed_no_reestablish_response() { // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] // is still failing to update monitors. - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); @@ -1873,8 +1873,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: } // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2041,8 +2041,8 @@ fn test_pending_update_fee_ack_on_reconnect() { let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // bs_first_raa is not delivered until it is re-generated after reconnect - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); @@ -2169,8 +2169,8 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); @@ -2303,9 +2303,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); } else { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); } - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // Now reconnect the two nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); @@ -2493,8 +2493,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } - nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); - nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); if second_fails { reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1831cf057..097312dd1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6270,13 +6270,13 @@ where let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id); } - fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) { + fn peer_disconnected(&self, counterparty_node_id: &PublicKey) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut failed_channels = Vec::new(); let mut per_peer_state = self.per_peer_state.write().unwrap(); let remove_peer = { - log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", - log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); + log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.", + log_pubkey!(counterparty_node_id)); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6332,7 +6332,7 @@ where fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> { if !init_msg.features.supports_static_remote_key() { - log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id)); + log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); return Err(()); } @@ -8210,8 +8210,8 @@ mod tests { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 9055d4e76..7e2532f1f 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3509,8 +3509,8 @@ fn test_dup_events_on_peer_disconnect() { nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); expect_payment_sent_without_paths!(nodes[0], payment_preimage); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); expect_payment_path_successful!(nodes[0]); @@ -3550,8 +3550,8 @@ fn test_peer_disconnected_before_funding_broadcasted() { // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are // disconnected before the funding transaction was broadcasted. - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer); check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); @@ -3567,8 +3567,8 @@ fn test_simple_peer_disconnect() { create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; @@ -3576,8 +3576,8 @@ fn test_simple_peer_disconnect() { fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2); claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000); @@ -3585,8 +3585,8 @@ fn test_simple_peer_disconnect() { let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3); fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5); @@ -3680,8 +3680,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } } - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); if messages_delivered < 3 { if simulate_broken_lnd { // lnd has a long-standing bug where they send a channel_ready prior to a @@ -3730,8 +3730,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken }; } - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); nodes[1].node.process_pending_htlc_forwards(); @@ -3813,8 +3813,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } } - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); if messages_delivered < 2 { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); if messages_delivered < 1 { @@ -3840,8 +3840,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken expect_payment_path_successful!(nodes[0]); } if messages_delivered <= 5 { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); } reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -3955,8 +3955,8 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { _ => panic!("Unexpected event"), } - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); @@ -6257,8 +6257,8 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); //Disconnect and Reconnect - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); @@ -6990,8 +6990,8 @@ fn test_announce_disable_channels() { create_announced_chan_between_nodes(&nodes, 0, 1); // Disconnect peers - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.timer_tick_occurred(); // Enabled -> DisabledStaged nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled @@ -8790,13 +8790,11 @@ fn test_error_chans_closed() { _ => panic!("Unexpected event"), } // Note that at this point users of a standard PeerHandler will end up calling - // peer_disconnected with no_connection_possible set to false, duplicating the - // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for - // users with their own peer handling logic. We duplicate the call here, however. + // peer_disconnected. assert_eq!(nodes[0].node.list_usable_channels().len(), 1); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); assert_eq!(nodes[0].node.list_usable_channels().len(), 1); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); } @@ -8912,8 +8910,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); - nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[1], true); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index e8b40c71d..d43dff6e1 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -993,14 +993,8 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures); // Connection loss/reestablish: - /// Indicates a connection to the peer failed/an existing connection was lost. If no connection - /// is believed to be possible in the future (eg they're sending us messages we don't - /// understand or indicate they require unknown feature bits), `no_connection_possible` is set - /// and any outstanding channels should be failed. - /// - /// Note that in some rare cases this may be called without a corresponding - /// [`Self::peer_connected`]. - fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool); + /// Indicates a connection to the peer failed/an existing connection was lost. + fn peer_disconnected(&self, their_node_id: &PublicKey); /// Handle a peer reconnecting, possibly generating `channel_reestablish` message(s). /// @@ -1115,10 +1109,7 @@ pub trait OnionMessageHandler : OnionMessageProvider { fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>; /// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to /// drop and refuse to forward onion messages to this peer. - /// - /// Note that in some rare cases this may be called without a corresponding - /// [`Self::peer_connected`]. - fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool); + fn peer_disconnected(&self, their_node_id: &PublicKey); // Handler information: /// Gets the node feature flags which this handler itself supports. All available handlers are diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index fbc8e1c9e..8f266bc17 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -579,8 +579,8 @@ fn test_onion_failure() { let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] - nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); - nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); }, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id)); reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 4fce8198d..4f51d2f81 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -350,8 +350,8 @@ fn no_pending_leak_on_initial_send_failure() { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, @@ -401,8 +401,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment // which would prevent retry. - nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); - nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); @@ -431,7 +431,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { assert_eq!(as_broadcasted_txn.len(), 1); assert_eq!(as_broadcasted_txn[0], as_commitment_tx); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -587,7 +587,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. @@ -679,7 +679,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty()); reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -699,7 +699,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // Ensure that after reload we cannot retry the payment. reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -741,8 +741,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); @@ -893,7 +893,7 @@ fn test_fulfill_restart_failure() { // Now reload nodes[1]... reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); nodes[1].node.fail_htlc_backwards(&payment_hash); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index be778708c..995901a3b 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -89,7 +89,7 @@ impl OnionMessageProvider for IgnoringMessageHandler { impl OnionMessageHandler for IgnoringMessageHandler { fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {} fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) } - fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {} + fn peer_disconnected(&self, _their_node_id: &PublicKey) {} fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() @@ -223,7 +223,7 @@ impl ChannelMessageHandler for ErroringMessageHandler { } // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them. fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {} - fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {} + fn peer_disconnected(&self, _their_node_id: &PublicKey) {} fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) } fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {} fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } @@ -315,16 +315,7 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone { /// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the /// descriptor. #[derive(Clone)] -pub struct PeerHandleError { - /// Used to indicate that we probably can't make any future connections to this peer (e.g. - /// because we required features that our peer was missing, or vice versa). - /// - /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close - /// any channels with this peer or check for new versions of LDK. - /// - /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager - pub no_connection_possible: bool, -} +pub struct PeerHandleError { } impl fmt::Debug for PeerHandleError { fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { formatter.write_str("Peer Sent Invalid Data") @@ -1009,7 +1000,7 @@ impl { let mut peer = peer_mutex.lock().unwrap(); @@ -1042,7 +1033,7 @@ impl Ok(res), Err(e) => { log_trace!(self.logger, "Peer sent invalid data or we decided to disconnect due to a protocol error"); - self.disconnect_event_internal(peer_descriptor, e.no_connection_possible); + self.disconnect_event_internal(peer_descriptor); Err(e) } } @@ -1075,7 +1066,7 @@ impl { let mut read_pos = 0; @@ -1089,7 +1080,7 @@ impl { //TODO: Try to push msg log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err); - return Err(PeerHandleError{ no_connection_possible: false }); + return Err(PeerHandleError { }); }, msgs::ErrorAction::IgnoreAndLog(level) => { log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); @@ -1142,7 +1133,7 @@ impl { log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0)); peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event - return Err(PeerHandleError{ no_connection_possible: false }) + return Err(PeerHandleError { }) }, hash_map::Entry::Vacant(entry) => { log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0)); @@ -1199,7 +1190,7 @@ impl 8192 { peer.pending_read_buffer = Vec::new(); } peer.pending_read_buffer.resize(msg_len as usize + 16, 0); if msg_len < 2 { // Need at least the message type tag - return Err(PeerHandleError{ no_connection_possible: false }); + return Err(PeerHandleError { }); } peer.pending_read_is_header = false; } else { @@ -1242,19 +1233,19 @@ impl { log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!"); self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) }); - return Err(PeerHandleError { no_connection_possible: false }); + return Err(PeerHandleError { }); } - (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }), + (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }), (msgs::DecodeError::InvalidValue, _) => { log_debug!(self.logger, "Got an invalid value while deserializing message"); - return Err(PeerHandleError { no_connection_possible: false }); + return Err(PeerHandleError { }); } (msgs::DecodeError::ShortRead, _) => { log_debug!(self.logger, "Deserialization failed due to shortness of message"); - return Err(PeerHandleError { no_connection_possible: false }); + return Err(PeerHandleError { }); } - (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }), - (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }), + (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }), + (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }), } } }; @@ -1306,10 +1297,10 @@ impl { @@ -1518,8 +1509,7 @@ impl { log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id); - // Fail the channel if message is an even, unknown type as per BOLT #1. - return Err(PeerHandleError{ no_connection_possible: true }.into()); + return Err(PeerHandleError { }.into()); }, wire::Message::Unknown(type_id) => { log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id); @@ -1920,7 +1910,7 @@ impl OnionMessageHandler for OnionMe Ok(()) } - fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) { + fn peer_disconnected(&self, their_node_id: &PublicKey) { let mut pending_msgs = self.pending_messages.lock().unwrap(); pending_msgs.remove(their_node_id); } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index e50576e1a..55abf6818 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -423,7 +423,7 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) { self.received_msg(wire::Message::ChannelReestablish(msg.clone())); } - fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) { + fn peer_disconnected(&self, their_node_id: &PublicKey) { assert!(self.connected_peers.lock().unwrap().remove(their_node_id)); } fn peer_connected(&self, their_node_id: &PublicKey, _msg: &msgs::Init) -> Result<(), ()> {