}
fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<SP::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
- let mut res = Vec::new();
// Allocate our best estimate of the number of channels we have in the `res`
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
// a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
// unlikely as the `short_to_chan_info` map often contains 2 entries for
// the same channel.
- res.reserve(self.short_to_chan_info.read().unwrap().len());
+ let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
let best_block_height = self.best_block.read().unwrap().height();
let per_peer_state = self.per_peer_state.read().unwrap();
true
}
- /// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
- /// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels of
- /// to that peer is later closed while still being disconnected (i.e. force closed), we
- /// therefore need to remove the peer from `peer_state` separately.
- /// To avoid having to take the `per_peer_state` `write` lock once the channels are closed, we
- /// instead remove such peers awaiting removal through this function, which is called on a
- /// timer through `timer_tick_occurred`, passing the peers disconnected peers with no channels,
- /// to limit the negative effects on parallelism as much as possible.
- ///
- /// Must be called without the `per_peer_state` lock acquired.
- fn remove_peers_awaiting_removal(&self, pending_peers_awaiting_removal: HashSet<PublicKey>) {
- if pending_peers_awaiting_removal.len() > 0 {
- let mut per_peer_state = self.per_peer_state.write().unwrap();
- for counterparty_node_id in pending_peers_awaiting_removal {
- match per_peer_state.entry(counterparty_node_id) {
- hash_map::Entry::Occupied(entry) => {
- // Remove the entry if the peer is still disconnected and we still
- // have no channels to the peer.
- let remove_entry = {
- let peer_state = entry.get().lock().unwrap();
- !peer_state.is_connected && peer_state.channel_by_id.len() == 0
- };
- if remove_entry {
- entry.remove_entry();
- }
- },
- hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ }
- }
- }
- }
- }
-
#[cfg(any(test, feature = "_test_utils"))]
/// Process background events, for functional testing
pub fn test_process_background_events(&self) {
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
- let mut pending_peers_awaiting_removal = HashSet::new();
+ let mut pending_peers_awaiting_removal = Vec::new();
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
});
let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0;
if peer_should_be_removed {
- pending_peers_awaiting_removal.insert(counterparty_node_id);
+ pending_peers_awaiting_removal.push(counterparty_node_id);
+ }
+ }
+ }
+
+ // When a peer disconnects but still has channels, the peer's `peer_state` entry in the
+ // `per_peer_state` is not removed by the `peer_disconnected` function. If the channels
+ // of to that peer is later closed while still being disconnected (i.e. force closed),
+ // we therefore need to remove the peer from `peer_state` separately.
+ // To avoid having to take the `per_peer_state` `write` lock once the channels are
+ // closed, we instead remove such peers awaiting removal here on a timer, to limit the
+ // negative effects on parallelism as much as possible.
+ if pending_peers_awaiting_removal.len() > 0 {
+ let mut per_peer_state = self.per_peer_state.write().unwrap();
+ for counterparty_node_id in pending_peers_awaiting_removal {
+ match per_peer_state.entry(counterparty_node_id) {
+ hash_map::Entry::Occupied(entry) => {
+ // Remove the entry if the peer is still disconnected and we still
+ // have no channels to the peer.
+ let remove_entry = {
+ let peer_state = entry.get().lock().unwrap();
+ !peer_state.is_connected && peer_state.channel_by_id.len() == 0
+ };
+ if remove_entry {
+ entry.remove_entry();
+ }
+ },
+ hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ }
}
}
}
- self.remove_peers_awaiting_removal(pending_peers_awaiting_removal);
self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
if htlcs.is_empty() {