Don't serialize nodes which we have no channels to
authorViktor Tigerström <11711198+ViktorTigerstrom@users.noreply.github.com>
Sat, 4 Feb 2023 18:14:42 +0000 (20:14 +0200)
committerViktor Tigerström <11711198+ViktorTigerstrom@users.noreply.github.com>
Tue, 14 Feb 2023 14:04:30 +0000 (15:04 +0100)
lightning/src/ln/channelmanager.rs
lightning/src/ln/reorg_tests.rs

index e3a67d75c9085fefd8e42a17160bb3e128ed298e..e2386a9474fa4a12a11cff98de3166b987371c71 100644 (file)
@@ -493,6 +493,18 @@ pub(super) struct PeerState<Signer: ChannelSigner> {
        is_connected: bool,
 }
 
+impl <Signer: ChannelSigner> PeerState<Signer> {
+       /// Indicates that a peer meets the criteria where we're ok to remove it from our storage.
+       /// If true is passed for `require_disconnected`, the function will return false if we haven't
+       /// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`.
+       fn ok_to_remove(&self, require_disconnected: bool) -> bool {
+               if require_disconnected && self.is_connected {
+                       return false
+               }
+               self.channel_by_id.len() == 0
+       }
+}
+
 /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
 /// actually ours and not some duplicate HTLC sent to us by a node along the route.
 ///
@@ -3521,8 +3533,7 @@ where
 
                                                true
                                        });
-                                       let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0;
-                                       if peer_should_be_removed {
+                                       if peer_state.ok_to_remove(true) {
                                                pending_peers_awaiting_removal.push(counterparty_node_id);
                                        }
                                }
@@ -3544,7 +3555,7 @@ where
                                                        // have no channels to the peer.
                                                        let remove_entry = {
                                                                let peer_state = entry.get().lock().unwrap();
-                                                               !peer_state.is_connected && peer_state.channel_by_id.len() == 0
+                                                               peer_state.ok_to_remove(true)
                                                        };
                                                        if remove_entry {
                                                                entry.remove_entry();
@@ -6254,9 +6265,8 @@ where
        fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                let mut failed_channels = Vec::new();
-               let mut no_channels_remain = true;
                let mut per_peer_state = self.per_peer_state.write().unwrap();
-               {
+               let remove_peer = {
                        log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
                                log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
                        if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
@@ -6269,8 +6279,6 @@ where
                                                update_maps_on_chan_removal!(self, chan);
                                                self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
                                                return false;
-                                       } else {
-                                               no_channels_remain = false;
                                        }
                                        true
                                });
@@ -6300,9 +6308,10 @@ where
                                });
                                debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
                                peer_state.is_connected = false;
-                       }
-               }
-               if no_channels_remain {
+                               peer_state.ok_to_remove(true)
+                       } else { true }
+               };
+               if remove_peer {
                        per_peer_state.remove(counterparty_node_id);
                }
                mem::drop(per_peer_state);
@@ -6896,6 +6905,7 @@ where
                        best_block.block_hash().write(writer)?;
                }
 
+               let mut serializable_peer_count: u64 = 0;
                {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let mut unfunded_channels = 0;
@@ -6903,6 +6913,9 @@ where
                        for (_, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
+                               if !peer_state.ok_to_remove(false) {
+                                       serializable_peer_count += 1;
+                               }
                                number_of_channels += peer_state.channel_by_id.len();
                                for (_, channel) in peer_state.channel_by_id.iter() {
                                        if !channel.is_funding_initiated() {
@@ -6953,11 +6966,18 @@ where
                        htlc_purposes.push(purpose);
                }
 
-               (per_peer_state.len() as u64).write(writer)?;
+               (serializable_peer_count).write(writer)?;
                for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
-                       peer_pubkey.write(writer)?;
-                       let peer_state = peer_state_mutex.lock().unwrap();
-                       peer_state.latest_features.write(writer)?;
+                       let peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &*peer_state_lock;
+                       // Peers which we have no channels to should be dropped once disconnected. As we
+                       // disconnect all peers when shutting down and serializing the ChannelManager, we
+                       // consider all peers as disconnected here. There's therefore no need write peers with
+                       // no channels.
+                       if !peer_state.ok_to_remove(false) {
+                               peer_pubkey.write(writer)?;
+                               peer_state.latest_features.write(writer)?;
+                       }
                }
 
                let events = self.pending_events.lock().unwrap();
index bc1b996f50474ebe2fd0b4bc1c29bb07bc46e2bc..ce1f3b64750bafbd7343ff66f47e8b16677d30a4 100644 (file)
@@ -13,7 +13,7 @@ use crate::chain::channelmonitor::ANTI_REORG_DELAY;
 use crate::chain::transaction::OutPoint;
 use crate::chain::Confirm;
 use crate::ln::channelmanager::ChannelManager;
-use crate::ln::msgs::ChannelMessageHandler;
+use crate::ln::msgs::{ChannelMessageHandler, Init};
 use crate::util::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 use crate::util::test_utils;
 use crate::util::ser::Writeable;
@@ -374,6 +374,12 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 
        // Now check that we can create a new channel
+       if reload_node && nodes[0].node.per_peer_state.read().unwrap().len() == 0 {
+               // If we dropped the channel before reloading the node, nodes[1] was also dropped from
+               // nodes[0] storage, and hence not connected again on startup. We therefore need to
+               // reconnect to the node before attempting to create a new channel.
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       }
        create_announced_chan_between_nodes(&nodes, 0, 1);
        send_payment(&nodes[0], &[&nodes[1]], 8000000);
 }