From: Viktor Tigerström <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Wed, 18 Jan 2023 01:30:36 +0000 (+0100) Subject: Initialize `list_channels_with_filter` result vec with capacity X-Git-Tag: v0.0.114-beta~21^2~1 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=9ae2810c3117dd7725121aeaa903abae1b55d681;p=rust-lightning Initialize `list_channels_with_filter` result vec with capacity --- diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index aefe9cc3e..e3a67d75c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1616,14 +1616,13 @@ where } fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { - let mut res = Vec::new(); // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside // of the ChannelMonitor handling. Therefore reallocations may still occur, but is // unlikely as the `short_to_chan_info` map often contains 2 entries for // the same channel. - res.reserve(self.short_to_chan_info.read().unwrap().len()); + let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len()); { let best_block_height = self.best_block.read().unwrap().height(); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -3401,38 +3400,6 @@ where true } - /// When a peer disconnects but still has channels, the peer's `peer_state` entry in the - /// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels of - /// to that peer is later closed while still being disconnected (i.e. force closed), we - /// therefore need to remove the peer from `peer_state` separately. - /// To avoid having to take the `per_peer_state` `write` lock once the channels are closed, we - /// instead remove such peers awaiting removal through this function, which is called on a - /// timer through `timer_tick_occurred`, passing the peers disconnected peers with no channels, - /// to limit the negative effects on parallelism as much as possible. - /// - /// Must be called without the `per_peer_state` lock acquired. - fn remove_peers_awaiting_removal(&self, pending_peers_awaiting_removal: HashSet) { - if pending_peers_awaiting_removal.len() > 0 { - let mut per_peer_state = self.per_peer_state.write().unwrap(); - for counterparty_node_id in pending_peers_awaiting_removal { - match per_peer_state.entry(counterparty_node_id) { - hash_map::Entry::Occupied(entry) => { - // Remove the entry if the peer is still disconnected and we still - // have no channels to the peer. - let remove_entry = { - let peer_state = entry.get().lock().unwrap(); - !peer_state.is_connected && peer_state.channel_by_id.len() == 0 - }; - if remove_entry { - entry.remove_entry(); - } - }, - hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ } - } - } - } - } - #[cfg(any(test, feature = "_test_utils"))] /// Process background events, for functional testing pub fn test_process_background_events(&self) { @@ -3506,7 +3473,7 @@ where let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); - let mut pending_peers_awaiting_removal = HashSet::new(); + let mut pending_peers_awaiting_removal = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { @@ -3556,11 +3523,37 @@ where }); let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0; if peer_should_be_removed { - pending_peers_awaiting_removal.insert(counterparty_node_id); + pending_peers_awaiting_removal.push(counterparty_node_id); + } + } + } + + // When a peer disconnects but still has channels, the peer's `peer_state` entry in the + // `per_peer_state` is not removed by the `peer_disconnected` function. If the channels + // of to that peer is later closed while still being disconnected (i.e. force closed), + // we therefore need to remove the peer from `peer_state` separately. + // To avoid having to take the `per_peer_state` `write` lock once the channels are + // closed, we instead remove such peers awaiting removal here on a timer, to limit the + // negative effects on parallelism as much as possible. + if pending_peers_awaiting_removal.len() > 0 { + let mut per_peer_state = self.per_peer_state.write().unwrap(); + for counterparty_node_id in pending_peers_awaiting_removal { + match per_peer_state.entry(counterparty_node_id) { + hash_map::Entry::Occupied(entry) => { + // Remove the entry if the peer is still disconnected and we still + // have no channels to the peer. + let remove_entry = { + let peer_state = entry.get().lock().unwrap(); + !peer_state.is_connected && peer_state.channel_by_id.len() == 0 + }; + if remove_entry { + entry.remove_entry(); + } + }, + hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ } } } } - self.remove_peers_awaiting_removal(pending_peers_awaiting_removal); self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| { if htlcs.is_empty() {