From: Matt Corallo Date: Mon, 1 Jan 2024 21:53:36 +0000 (+0000) Subject: Use utility methods to construct `HashMap`s and `HashSet`s X-Git-Tag: v0.0.123-beta~80^2~4 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=3e0d55bd2b96ea10ecc744d4cbd9bbfc1ad4ef50;p=rust-lightning Use utility methods to construct `HashMap`s and `HashSet`s In the next commit we'll bump the `hashbrown` version, which no longer randomizes its hasher by default. Thus, we'll need to call a different constructor in no-std builds from std builds. Here we do a quick prefactor to use wrappers for constructors instead of calling the tables directly to make the version bump changeset smaller. --- diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index b71f10f58..4adafd2db 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -420,7 +420,7 @@ where C::Target: chain::Filter, /// transactions relevant to the watched channels. pub fn new(chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P) -> Self { Self { - monitors: RwLock::new(HashMap::new()), + monitors: RwLock::new(new_hash_map()), sync_persistence_id: AtomicCounter::new(), chain_source, broadcaster, diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index fb5c18eae..a9250c0a7 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1235,7 +1235,7 @@ impl ChannelMonitor { channel_parameters.clone(), initial_holder_commitment_tx, secp_ctx ); - let mut outputs_to_watch = HashMap::new(); + let mut outputs_to_watch = new_hash_map(); outputs_to_watch.insert(funding_info.0.txid, vec![(funding_info.0.index as u32, funding_info.1.clone())]); Self::from_impl(ChannelMonitorImpl { @@ -1262,17 +1262,17 @@ impl ChannelMonitor { on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay, commitment_secrets: CounterpartyCommitmentSecrets::new(), - counterparty_claimable_outpoints: HashMap::new(), - counterparty_commitment_txn_on_chain: HashMap::new(), - counterparty_hash_commitment_number: HashMap::new(), - counterparty_fulfilled_htlcs: HashMap::new(), + counterparty_claimable_outpoints: new_hash_map(), + counterparty_commitment_txn_on_chain: new_hash_map(), + counterparty_hash_commitment_number: new_hash_map(), + counterparty_fulfilled_htlcs: new_hash_map(), prev_holder_signed_commitment_tx: None, current_holder_commitment_tx: holder_commitment_tx, current_counterparty_commitment_number: 1 << 48, current_holder_commitment_number, - payment_preimages: HashMap::new(), + payment_preimages: new_hash_map(), pending_monitor_events: Vec::new(), pending_events: Vec::new(), is_processing_pending_events: false, @@ -2174,7 +2174,7 @@ impl ChannelMonitor { /// HTLCs which were resolved on-chain (i.e. where the final HTLC resolution was done by an /// event from this `ChannelMonitor`). pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap)> { - let mut res = HashMap::new(); + let mut res = new_hash_map(); // Just examine the available counterparty commitment transactions. See docs on // `fail_unbroadcast_htlcs`, below, for justification. let us = self.inner.lock().unwrap(); @@ -2226,7 +2226,7 @@ impl ChannelMonitor { return self.get_all_current_outbound_htlcs(); } - let mut res = HashMap::new(); + let mut res = new_hash_map(); macro_rules! walk_htlcs { ($holder_commitment: expr, $htlc_iter: expr) => { for (htlc, source) in $htlc_iter { @@ -3940,7 +3940,7 @@ impl ChannelMonitorImpl { /// Filters a block's `txdata` for transactions spending watched outputs or for any child /// transactions thereof. fn filter_block<'a>(&self, txdata: &TransactionData<'a>) -> Vec<&'a Transaction> { - let mut matched_txn = HashSet::new(); + let mut matched_txn = new_hash_set(); txdata.iter().filter(|&&(_, tx)| { let mut matches = self.spends_watched_output(tx); for input in tx.input.iter() { @@ -4455,7 +4455,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } let counterparty_claimable_outpoints_len: u64 = Readable::read(reader)?; - let mut counterparty_claimable_outpoints = HashMap::with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64)); + let mut counterparty_claimable_outpoints = hash_map_with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64)); for _ in 0..counterparty_claimable_outpoints_len { let txid: Txid = Readable::read(reader)?; let htlcs_count: u64 = Readable::read(reader)?; @@ -4469,7 +4469,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } let counterparty_commitment_txn_on_chain_len: u64 = Readable::read(reader)?; - let mut counterparty_commitment_txn_on_chain = HashMap::with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32)); + let mut counterparty_commitment_txn_on_chain = hash_map_with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32)); for _ in 0..counterparty_commitment_txn_on_chain_len { let txid: Txid = Readable::read(reader)?; let commitment_number = ::read(reader)?.0; @@ -4479,7 +4479,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } let counterparty_hash_commitment_number_len: u64 = Readable::read(reader)?; - let mut counterparty_hash_commitment_number = HashMap::with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32)); + let mut counterparty_hash_commitment_number = hash_map_with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32)); for _ in 0..counterparty_hash_commitment_number_len { let payment_hash: PaymentHash = Readable::read(reader)?; let commitment_number = ::read(reader)?.0; @@ -4502,7 +4502,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let current_holder_commitment_number = ::read(reader)?.0; let payment_preimages_len: u64 = Readable::read(reader)?; - let mut payment_preimages = HashMap::with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32)); + let mut payment_preimages = hash_map_with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32)); for _ in 0..payment_preimages_len { let preimage: PaymentPreimage = Readable::read(reader)?; let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); @@ -4542,7 +4542,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } let outputs_to_watch_len: u64 = Readable::read(reader)?; - let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::() + mem::size_of::>()))); + let mut outputs_to_watch = hash_map_with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::() + mem::size_of::>()))); for _ in 0..outputs_to_watch_len { let txid = Readable::read(reader)?; let outputs_len: u64 = Readable::read(reader)?; @@ -4584,7 +4584,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut counterparty_node_id = None; let mut confirmed_commitment_tx_counterparty_output = None; let mut spendable_txids_confirmed = Some(Vec::new()); - let mut counterparty_fulfilled_htlcs = Some(HashMap::new()); + let mut counterparty_fulfilled_htlcs = Some(new_hash_map()); let mut initial_counterparty_commitment_info = None; let mut channel_id = None; read_tlv_fields!(reader, { diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 108ff0093..5fe4bd2c1 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -374,13 +374,13 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP signer.provide_channel_parameters(&channel_parameters); let pending_claim_requests_len: u64 = Readable::read(reader)?; - let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128)); + let mut pending_claim_requests = hash_map_with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128)); for _ in 0..pending_claim_requests_len { pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?); } let claimable_outpoints_len: u64 = Readable::read(reader)?; - let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128)); + let mut claimable_outpoints = hash_map_with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128)); for _ in 0..claimable_outpoints_len { let outpoint = Readable::read(reader)?; let ancestor_claim_txid = Readable::read(reader)?; @@ -445,8 +445,8 @@ impl OnchainTxHandler prev_holder_commitment: None, signer, channel_transaction_parameters: channel_parameters, - pending_claim_requests: HashMap::new(), - claimable_outpoints: HashMap::new(), + pending_claim_requests: new_hash_map(), + claimable_outpoints: new_hash_map(), locktimed_packages: BTreeMap::new(), onchain_events_awaiting_threshold_conf: Vec::new(), pending_claim_events: Vec::new(), @@ -834,7 +834,7 @@ impl OnchainTxHandler F::Target: FeeEstimator, { log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height); - let mut bump_candidates = HashMap::new(); + let mut bump_candidates = new_hash_map(); for tx in txn_matched { // Scan all input to verify is one of the outpoint spent is of interest for us let mut claimed_outputs_material = Vec::new(); @@ -1020,7 +1020,7 @@ impl OnchainTxHandler where B::Target: BroadcasterInterface, F::Target: FeeEstimator, { - let mut bump_candidates = HashMap::new(); + let mut bump_candidates = new_hash_map(); let onchain_events_awaiting_threshold_conf = self.onchain_events_awaiting_threshold_conf.drain(..).collect::>(); for entry in onchain_events_awaiting_threshold_conf { diff --git a/lightning/src/events/bump_transaction.rs b/lightning/src/events/bump_transaction.rs index 1b019ba34..0676fef14 100644 --- a/lightning/src/events/bump_transaction.rs +++ b/lightning/src/events/bump_transaction.rs @@ -391,7 +391,7 @@ where /// Returns a new instance backed by the given [`WalletSource`] that serves as an implementation /// of [`CoinSelectionSource`]. pub fn new(source: W, logger: L) -> Self { - Self { source, logger, locked_utxos: Mutex::new(HashMap::new()) } + Self { source, logger, locked_utxos: Mutex::new(new_hash_map()) } } /// Performs coin selection on the set of UTXOs obtained from diff --git a/lightning/src/lib.rs b/lightning/src/lib.rs index 160f63241..33f596fd8 100644 --- a/lightning/src/lib.rs +++ b/lightning/src/lib.rs @@ -169,10 +169,40 @@ mod prelude { extern crate hashbrown; pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque, boxed::Box}; + + #[cfg(not(feature = "hashbrown"))] + mod std_hashtables { + pub(crate) use std::collections::{HashMap, HashSet, hash_map}; + + pub(crate) type OccupiedHashMapEntry<'a, K, V> = + std::collections::hash_map::OccupiedEntry<'a, K, V>; + pub(crate) type VacantHashMapEntry<'a, K, V> = + std::collections::hash_map::VacantEntry<'a, K, V>; + } #[cfg(not(feature = "hashbrown"))] - pub use std::collections::{HashMap, HashSet, hash_map}; + pub(crate) use std_hashtables::*; + + #[cfg(feature = "hashbrown")] + mod hashbrown_tables { + pub(crate) use hashbrown::{HashMap, HashSet, hash_map}; + + pub(crate) type OccupiedHashMapEntry<'a, K, V> = + hashbrown::hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>; + pub(crate) type VacantHashMapEntry<'a, K, V> = + hashbrown::hash_map::VacantEntry<'a, K, V, hash_map::DefaultHashBuilder>; + } #[cfg(feature = "hashbrown")] - pub use self::hashbrown::{HashMap, HashSet, hash_map}; + pub(crate) use hashbrown_tables::*; + + pub(crate) fn new_hash_map() -> HashMap { HashMap::new() } + pub(crate) fn hash_map_with_capacity(cap: usize) -> HashMap { + HashMap::with_capacity(cap) + } + + pub(crate) fn new_hash_set() -> HashSet { HashSet::new() } + pub(crate) fn hash_set_with_capacity(cap: usize) -> HashSet { + HashSet::with_capacity(cap) + } pub use alloc::borrow::ToOwned; pub use alloc::string::ToString; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e56fa9789..d2034ebc8 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6431,7 +6431,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { channel_ready_event_emitted: false, #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: HashSet::new(), + historical_inbound_htlc_fulfills: new_hash_set(), channel_type, channel_keys_id, @@ -7232,7 +7232,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { channel_ready_event_emitted: false, #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: HashSet::new(), + historical_inbound_htlc_fulfills: new_hash_set(), channel_type, channel_keys_id, @@ -8048,7 +8048,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let channel_update_status = Readable::read(reader)?; #[cfg(any(test, fuzzing))] - let mut historical_inbound_htlc_fulfills = HashSet::new(); + let mut historical_inbound_htlc_fulfills = new_hash_set(); #[cfg(any(test, fuzzing))] { let htlc_fulfills_len: u64 = Readable::read(reader)?; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 976a7b567..a9ae6ee3c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2432,14 +2432,14 @@ where best_block: RwLock::new(params.best_block), - outbound_scid_aliases: Mutex::new(HashSet::new()), - pending_inbound_payments: Mutex::new(HashMap::new()), + outbound_scid_aliases: Mutex::new(new_hash_set()), + pending_inbound_payments: Mutex::new(new_hash_map()), pending_outbound_payments: OutboundPayments::new(), - forward_htlcs: Mutex::new(HashMap::new()), - claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }), - pending_intercepted_htlcs: Mutex::new(HashMap::new()), - outpoint_to_peer: Mutex::new(HashMap::new()), - short_to_chan_info: FairRwLock::new(HashMap::new()), + forward_htlcs: Mutex::new(new_hash_map()), + claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }), + pending_intercepted_htlcs: Mutex::new(new_hash_map()), + outpoint_to_peer: Mutex::new(new_hash_map()), + short_to_chan_info: FairRwLock::new(new_hash_map()), our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(), secp_ctx, @@ -2451,7 +2451,7 @@ where highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize), - per_peer_state: FairRwLock::new(HashMap::new()), + per_peer_state: FairRwLock::new(new_hash_map()), pending_events: Mutex::new(VecDeque::new()), pending_events_processor: AtomicBool::new(false), @@ -3685,7 +3685,7 @@ where ProbeSendFailure::RouteNotFound })?; - let mut used_liquidity_map = HashMap::with_capacity(first_hops.len()); + let mut used_liquidity_map = hash_map_with_capacity(first_hops.len()); let mut res = Vec::new(); @@ -4242,7 +4242,7 @@ where let mut failed_forwards = Vec::new(); let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); { - let mut forward_htlcs = HashMap::new(); + let mut forward_htlcs = new_hash_map(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); for (short_chan_id, mut pending_forwards) in forward_htlcs { @@ -9015,8 +9015,8 @@ where return NotifyOption::SkipPersistNoEvents; } e.insert(Mutex::new(PeerState { - channel_by_id: HashMap::new(), - inbound_channel_request_by_id: HashMap::new(), + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -10067,7 +10067,7 @@ where } // Encode without retry info for 0.0.101 compatibility. - let mut pending_outbound_payments_no_retry: HashMap> = HashMap::new(); + let mut pending_outbound_payments_no_retry: HashMap> = new_hash_map(); for (id, outbound) in pending_outbound_payments.iter() { match outbound { PendingOutboundPayment::Legacy { session_privs } | @@ -10095,7 +10095,7 @@ where for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) { for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() { if !updates.is_empty() { - if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); } + if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); } in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates); } } @@ -10331,13 +10331,13 @@ where let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; - let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut funded_peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); + let mut funded_peer_channels: HashMap>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize); + let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) @@ -10423,7 +10423,7 @@ where by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel)); }, hash_map::Entry::Vacant(entry) => { - let mut by_id_map = HashMap::new(); + let mut by_id_map = new_hash_map(); by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel)); entry.insert(by_id_map); } @@ -10470,7 +10470,7 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; @@ -10496,7 +10496,7 @@ where let peer_state_from_chans = |channel_by_id| { PeerState { channel_by_id, - inbound_channel_request_by_id: HashMap::new(), + inbound_channel_request_by_id: new_hash_map(), latest_features: InitFeatures::empty(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -10507,10 +10507,10 @@ where }; let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); + let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; - let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()); + let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map()); let mut peer_state = peer_state_from_chans(peer_chans); peer_state.latest_features = Readable::read(reader)?; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); @@ -10544,7 +10544,7 @@ where let highest_seen_timestamp: u32 = Readable::read(reader)?; let pending_inbound_payment_count: u64 = Readable::read(reader)?; - let mut pending_inbound_payments: HashMap = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32))); + let mut pending_inbound_payments: HashMap = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32))); for _ in 0..pending_inbound_payment_count { if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() { return Err(DecodeError::InvalidValue); @@ -10553,7 +10553,7 @@ where let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; let mut pending_outbound_payments_compat: HashMap = - HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); + hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); for _ in 0..pending_outbound_payments_count_compat { let session_priv = Readable::read(reader)?; let payment = PendingOutboundPayment::Legacy { @@ -10567,13 +10567,13 @@ where // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = Some(HashMap::new()); + let mut pending_intercepted_htlcs: Option> = Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; let mut claimable_htlc_onion_fields = None; - let mut pending_claiming_payments = Some(HashMap::new()); + let mut pending_claiming_payments = Some(new_hash_map()); let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); let mut events_override = None; let mut in_flight_monitor_updates: Option>> = None; @@ -10610,7 +10610,7 @@ where if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); } else if pending_outbound_payments.is_none() { - let mut outbounds = HashMap::new(); + let mut outbounds = new_hash_map(); for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() { outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); } @@ -10719,7 +10719,7 @@ where // still open, we need to replay any monitor updates that are for closed channels, // creating the neccessary peer_state entries as we go. let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| { - Mutex::new(peer_state_from_chans(HashMap::new())) + Mutex::new(peer_state_from_chans(new_hash_map())) }); let mut peer_state = peer_state_mutex.lock().unwrap(); handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, @@ -10905,7 +10905,7 @@ where let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); - let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len()); + let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); if let Some(purposes) = claimable_htlc_purposes { if purposes.len() != claimable_htlcs_list.len() { return Err(DecodeError::InvalidValue); @@ -10978,7 +10978,7 @@ where } } - let mut outbound_scid_aliases = HashSet::new(); + let mut outbound_scid_aliases = new_hash_set(); for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12511,7 +12511,7 @@ mod tests { let (scid_1, scid_2) = (42, 43); - let mut forward_htlcs = HashMap::new(); + let mut forward_htlcs = new_hash_map(); forward_htlcs.insert(scid_1, dummy_htlcs_1.clone()); forward_htlcs.insert(scid_2, dummy_htlcs_2.clone()); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e53fd6b0c..26f647211 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -618,7 +618,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { // Before using all the new monitors to check the watch outpoints, use the full set of // them to ensure we can write and reload our ChannelManager. { - let mut channel_monitors = HashMap::new(); + let mut channel_monitors = new_hash_map(); for monitor in deserialized_monitors.iter_mut() { channel_monitors.insert(monitor.get_funding_txo().0, monitor); } @@ -1049,7 +1049,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User let mut node_read = &chanman_encoded[..]; let (_, node_deserialized) = { - let mut channel_monitors = HashMap::new(); + let mut channel_monitors = new_hash_map(); for monitor in monitors_read.iter_mut() { assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none()); } @@ -3104,7 +3104,7 @@ pub enum HTLCType { NONE, TIMEOUT, SUCCESS } /// also fail. pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction), commitment_tx: Option, has_htlc_tx: HTLCType) -> Vec { let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap(); - let mut txn_seen = HashSet::new(); + let mut txn_seen = new_hash_set(); node_txn.retain(|tx| txn_seen.insert(tx.txid())); assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 }); @@ -3169,7 +3169,7 @@ pub fn test_revoked_htlc_claim_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c> pub fn check_preimage_claim<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, prev_txn: &Vec) -> Vec { let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap(); - let mut txn_seen = HashSet::new(); + let mut txn_seen = new_hash_set(); node_txn.retain(|tx| txn_seen.insert(tx.txid())); let mut found_prev = false; @@ -3269,7 +3269,7 @@ macro_rules! get_channel_value_stat { macro_rules! get_chan_reestablish_msgs { ($src_node: expr, $dst_node: expr) => { { - let mut announcements = $crate::prelude::HashSet::new(); + let mut announcements = $crate::prelude::new_hash_set(); let mut res = Vec::with_capacity(1); for msg in $src_node.node.get_and_clear_pending_msg_events() { if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 5f9891031..821fd1f78 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3323,7 +3323,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // block connection just like the !deliver_bs_raa case } - let mut failed_htlcs = HashSet::new(); + let mut failed_htlcs = new_hash_set(); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); mine_transaction(&nodes[1], &revoked_local_txn[0]); @@ -5402,7 +5402,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let as_events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 }); - let mut as_failds = HashSet::new(); + let mut as_failds = new_hash_set(); let mut as_updates = 0; for event in as_events.iter() { if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { @@ -5428,7 +5428,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let bs_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 }); - let mut bs_failds = HashSet::new(); + let mut bs_failds = new_hash_set(); let mut bs_updates = 0; for event in bs_events.iter() { if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { @@ -7340,7 +7340,7 @@ fn test_announce_disable_channels() { } let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - let mut chans_disabled = HashMap::new(); + let mut chans_disabled = new_hash_map(); for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 6a56c3cf8..aaa4a0f66 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2572,7 +2572,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() { check_spends!(revoked_htlc_claim, htlc_tx); } - let mut revoked_claim_transaction_map = HashMap::new(); + let mut revoked_claim_transaction_map = new_hash_map(); for current_tx in txn.into_iter() { revoked_claim_transaction_map.insert(current_tx.txid(), current_tx); } diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index db8b43e3f..c260bac85 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -171,7 +171,7 @@ impl PendingOutboundPayment { } fn mark_fulfilled(&mut self) { - let mut session_privs = HashSet::new(); + let mut session_privs = new_hash_set(); core::mem::swap(&mut session_privs, match self { PendingOutboundPayment::Legacy { session_privs } | PendingOutboundPayment::Retryable { session_privs, .. } | @@ -186,7 +186,7 @@ impl PendingOutboundPayment { fn mark_abandoned(&mut self, reason: PaymentFailureReason) { if let PendingOutboundPayment::Retryable { session_privs, payment_hash, .. } = self { - let mut our_session_privs = HashSet::new(); + let mut our_session_privs = new_hash_set(); core::mem::swap(&mut our_session_privs, session_privs); *self = PendingOutboundPayment::Abandoned { session_privs: our_session_privs, @@ -195,7 +195,7 @@ impl PendingOutboundPayment { }; } else if let PendingOutboundPayment::InvoiceReceived { payment_hash, .. } = self { *self = PendingOutboundPayment::Abandoned { - session_privs: HashSet::new(), + session_privs: new_hash_set(), payment_hash: *payment_hash, reason: Some(reason) }; @@ -675,7 +675,7 @@ pub(super) struct OutboundPayments { impl OutboundPayments { pub(super) fn new() -> Self { Self { - pending_outbound_payments: Mutex::new(HashMap::new()), + pending_outbound_payments: Mutex::new(new_hash_map()), retry_lock: Mutex::new(()), } } @@ -1268,7 +1268,7 @@ impl OutboundPayments { retry_strategy, attempts: PaymentAttempts::new(), payment_params, - session_privs: HashSet::new(), + session_privs: new_hash_set(), pending_amt_msat: 0, pending_fee_msat: Some(0), payment_hash, diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 3e07f50b2..b062a9a88 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -944,8 +944,8 @@ impl HashMap> { let mut message_recipients = self.message_recipients.lock().unwrap(); - let mut msgs = HashMap::new(); + let mut msgs = new_hash_map(); // We don't want to disconnect the peers by removing them entirely from the original map, so we // release the pending message buffers individually. for (node_id, recipient) in &mut *message_recipients { diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 9c8fd40af..801f6bfa7 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1353,8 +1353,8 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { channels: RwLock::new(channels), nodes: RwLock::new(nodes), last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp), - removed_nodes: Mutex::new(HashMap::new()), - removed_channels: Mutex::new(HashMap::new()), + removed_nodes: Mutex::new(new_hash_map()), + removed_channels: Mutex::new(new_hash_map()), pending_checks: utxo::PendingChecks::new(), }) } @@ -1398,8 +1398,8 @@ impl NetworkGraph where L::Target: Logger { channels: RwLock::new(IndexedMap::new()), nodes: RwLock::new(IndexedMap::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), - removed_channels: Mutex::new(HashMap::new()), - removed_nodes: Mutex::new(HashMap::new()), + removed_channels: Mutex::new(new_hash_map()), + removed_nodes: Mutex::new(new_hash_map()), pending_checks: utxo::PendingChecks::new(), } } diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 1bf90883e..4214a1821 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -277,7 +277,7 @@ pub struct InFlightHtlcs( impl InFlightHtlcs { /// Constructs an empty `InFlightHtlcs`. - pub fn new() -> Self { InFlightHtlcs(HashMap::new()) } + pub fn new() -> Self { InFlightHtlcs(new_hash_map()) } /// Takes in a path with payer's node id and adds the path's details to `InFlightHtlcs`. pub fn process_path(&mut self, path: &Path, payer_node_id: PublicKey) { @@ -1962,7 +1962,7 @@ where L::Target: Logger { // inserting first hops suggested by the caller as targets. // Our search will then attempt to reach them while traversing from the payee node. let mut first_hop_targets: HashMap<_, Vec<&ChannelDetails>> = - HashMap::with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 }); + hash_map_with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 }); if let Some(hops) = first_hops { for chan in hops { if chan.get_outbound_payment_scid().is_none() { @@ -1981,7 +1981,7 @@ where L::Target: Logger { } } - let mut private_hop_key_cache = HashMap::with_capacity( + let mut private_hop_key_cache = hash_map_with_capacity( payment_params.payee.unblinded_route_hints().iter().map(|path| path.0.len()).sum() ); @@ -2002,7 +2002,7 @@ where L::Target: Logger { // Map from node_id to information about the best current path to that node, including feerate // information. - let mut dist: HashMap = HashMap::with_capacity(network_nodes.len()); + let mut dist: HashMap = hash_map_with_capacity(network_nodes.len()); // During routing, if we ignore a path due to an htlc_minimum_msat limit, we set this, // indicating that we may wish to try again with a higher value, potentially paying to meet an @@ -2043,7 +2043,7 @@ where L::Target: Logger { // is used. Hence, liquidity used in one direction will not offset any used in the opposite // direction. let mut used_liquidities: HashMap = - HashMap::with_capacity(network_nodes.len()); + hash_map_with_capacity(network_nodes.len()); // Keeping track of how much value we already collected across other paths. Helps to decide // when we want to stop looking for new paths. diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 8c36bd100..ec4921812 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -653,7 +653,7 @@ impl Default for ProbabilisticScoringFeeParameters { base_penalty_amount_multiplier_msat: 8192, liquidity_penalty_multiplier_msat: 30_000, liquidity_penalty_amount_multiplier_msat: 192, - manual_node_penalties: HashMap::new(), + manual_node_penalties: new_hash_map(), anti_probing_penalty_msat: 250, considered_impossible_penalty_msat: 1_0000_0000_000, historical_liquidity_penalty_multiplier_msat: 10_000, @@ -695,7 +695,7 @@ impl ProbabilisticScoringFeeParameters { /// Clears the list of manual penalties that are applied during path finding. pub fn clear_manual_penalties(&mut self) { - self.manual_node_penalties = HashMap::new(); + self.manual_node_penalties = new_hash_map(); } } @@ -709,7 +709,7 @@ impl ProbabilisticScoringFeeParameters { liquidity_penalty_amount_multiplier_msat: 0, historical_liquidity_penalty_multiplier_msat: 0, historical_liquidity_penalty_amount_multiplier_msat: 0, - manual_node_penalties: HashMap::new(), + manual_node_penalties: new_hash_map(), anti_probing_penalty_msat: 0, considered_impossible_penalty_msat: 0, linear_success_probability: true, @@ -819,7 +819,7 @@ impl>, L: Deref> ProbabilisticScorer whe decay_params, network_graph, logger, - channel_liquidities: HashMap::new(), + channel_liquidities: new_hash_map(), } } @@ -2073,7 +2073,7 @@ ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScore r: &mut R, args: (ProbabilisticScoringDecayParameters, G, L) ) -> Result { let (decay_params, network_graph, logger) = args; - let mut channel_liquidities = HashMap::new(); + let mut channel_liquidities = new_hash_map(); read_tlv_fields!(r, { (0, channel_liquidities, required), }); diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index e190d4258..ada90345e 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -307,7 +307,7 @@ pub(super) struct PendingChecks { impl PendingChecks { pub(super) fn new() -> Self { PendingChecks { internal: Mutex::new(PendingChecksContext { - channels: HashMap::new(), nodes: HashMap::new(), + channels: new_hash_map(), nodes: new_hash_map(), }) } } diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index e2a89d848..a7237493b 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -350,7 +350,7 @@ impl SpendableOutputDescriptor { let mut input = Vec::with_capacity(descriptors.len()); let mut input_value = 0; let mut witness_weight = 0; - let mut output_set = HashSet::with_capacity(descriptors.len()); + let mut output_set = hash_set_with_capacity(descriptors.len()); for outp in descriptors { match outp { SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => { diff --git a/lightning/src/sync/debug_sync.rs b/lightning/src/sync/debug_sync.rs index b9f015af6..2b75e0953 100644 --- a/lightning/src/sync/debug_sync.rs +++ b/lightning/src/sync/debug_sync.rs @@ -14,7 +14,7 @@ use std::sync::Condvar as StdCondvar; pub use std::sync::WaitTimeoutResult; -use crate::prelude::HashMap; +use crate::prelude::*; use super::{LockTestExt, LockHeldState}; @@ -57,7 +57,7 @@ impl Condvar { thread_local! { /// We track the set of locks currently held by a reference to their `LockMetadata` - static LOCKS_HELD: RefCell>> = RefCell::new(HashMap::new()); + static LOCKS_HELD: RefCell>> = RefCell::new(new_hash_map()); } static LOCK_IDX: AtomicUsize = AtomicUsize::new(0); @@ -113,7 +113,7 @@ impl LockMetadata { let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64; let res = Arc::new(LockMetadata { - locked_before: StdMutex::new(HashMap::new()), + locked_before: StdMutex::new(new_hash_map()), lock_idx, _lock_construction_bt: backtrace, }); @@ -122,7 +122,7 @@ impl LockMetadata { { let (lock_constr_location, lock_constr_colno) = locate_call_symbol(&res._lock_construction_bt); - LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } }); + LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(new_hash_map())); } }); let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap(); match locks.entry(lock_constr_location) { hash_map::Entry::Occupied(e) => { diff --git a/lightning/src/util/indexed_map.rs b/lightning/src/util/indexed_map.rs index 39565f048..d4c20f722 100644 --- a/lightning/src/util/indexed_map.rs +++ b/lightning/src/util/indexed_map.rs @@ -1,6 +1,6 @@ //! This module has a map which can be iterated in a deterministic order. See the [`IndexedMap`]. -use crate::prelude::{HashMap, hash_map}; +use crate::prelude::*; use alloc::vec::Vec; use alloc::slice::Iter; use core::hash::Hash; @@ -34,7 +34,7 @@ impl IndexedMap { /// Constructs a new, empty map pub fn new() -> Self { Self { - map: HashMap::new(), + map: new_hash_map(), keys: Vec::new(), } } @@ -42,7 +42,7 @@ impl IndexedMap { /// Constructs a new, empty map with the given capacity pre-allocated pub fn with_capacity(capacity: usize) -> Self { Self { - map: HashMap::with_capacity(capacity), + map: hash_map_with_capacity(capacity), keys: Vec::with_capacity(capacity), } } @@ -176,10 +176,7 @@ impl<'a, K: Hash + Ord, V: 'a> Iterator for Range<'a, K, V> { /// /// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly. pub struct VacantEntry<'a, K: Hash + Ord, V> { - #[cfg(feature = "hashbrown")] - underlying_entry: hash_map::VacantEntry<'a, K, V, hash_map::DefaultHashBuilder>, - #[cfg(not(feature = "hashbrown"))] - underlying_entry: hash_map::VacantEntry<'a, K, V>, + underlying_entry: VacantHashMapEntry<'a, K, V>, key: K, keys: &'a mut Vec, } @@ -188,10 +185,7 @@ pub struct VacantEntry<'a, K: Hash + Ord, V> { /// /// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly. pub struct OccupiedEntry<'a, K: Hash + Ord, V> { - #[cfg(feature = "hashbrown")] - underlying_entry: hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>, - #[cfg(not(feature = "hashbrown"))] - underlying_entry: hash_map::OccupiedEntry<'a, K, V>, + underlying_entry: OccupiedHashMapEntry<'a, K, V>, keys: &'a mut Vec, } diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index 484d60340..c24a99f8f 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -749,7 +749,7 @@ macro_rules! impl_for_map { } impl_for_map!(BTreeMap, Ord, |_| BTreeMap::new()); -impl_for_map!(HashMap, Hash, |len| HashMap::with_capacity(len)); +impl_for_map!(HashMap, Hash, |len| hash_map_with_capacity(len)); // HashSet impl Writeable for HashSet @@ -771,7 +771,7 @@ where T: Readable + Eq + Hash #[inline] fn read(r: &mut R) -> Result { let len: CollectionLength = Readable::read(r)?; - let mut ret = HashSet::with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::())); + let mut ret = hash_set_with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::())); for _ in 0..len.0 { if !ret.insert(T::read(r)?) { return Err(DecodeError::InvalidValue) diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index a96711d14..bd415e42b 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -318,8 +318,8 @@ impl<'a> TestChainMonitor<'a> { pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist, keys_manager: &'a TestKeysInterface) -> Self { Self { added_monitors: Mutex::new(Vec::new()), - monitor_updates: Mutex::new(HashMap::new()), - latest_monitor_update_id: Mutex::new(HashMap::new()), + monitor_updates: Mutex::new(new_hash_map()), + latest_monitor_update_id: Mutex::new(new_hash_map()), chain_monitor: chainmonitor::ChainMonitor::new(chain_source, broadcaster, logger, fee_estimator, persister), keys_manager, expect_channel_force_closed: Mutex::new(None), @@ -414,8 +414,8 @@ impl WatchtowerPersister { pub(crate) fn new(destination_script: ScriptBuf) -> Self { WatchtowerPersister { persister: TestPersister::new(), - unsigned_justice_tx_data: Mutex::new(HashMap::new()), - watchtower_state: Mutex::new(HashMap::new()), + unsigned_justice_tx_data: Mutex::new(new_hash_map()), + watchtower_state: Mutex::new(new_hash_map()), destination_script, } } @@ -448,7 +448,7 @@ impl chainmonitor::Persist Self { Self { update_rets: Mutex::new(VecDeque::new()), - chain_sync_monitor_persistences: Mutex::new(HashMap::new()), - offchain_monitor_updates: Mutex::new(HashMap::new()), + chain_sync_monitor_persistences: Mutex::new(new_hash_map()), + offchain_monitor_updates: Mutex::new(new_hash_map()), } } @@ -534,9 +534,9 @@ impl chainmonitor::Persist Self { - let persisted_bytes = Mutex::new(HashMap::new()); + let persisted_bytes = Mutex::new(new_hash_map()); Self { persisted_bytes, read_only } } } @@ -589,7 +589,7 @@ impl KVStore for TestStore { } else { format!("{}/{}", primary_namespace, secondary_namespace) }; - let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new()); + let outer_e = persisted_lock.entry(prefixed).or_insert(new_hash_map()); let mut bytes = Vec::new(); bytes.write_all(buf)?; outer_e.insert(key.to_string(), bytes); @@ -656,7 +656,7 @@ impl TestBroadcaster { pub fn unique_txn_broadcast(&self) -> Vec { let mut txn = self.txn_broadcasted.lock().unwrap().split_off(0); - let mut seen = HashSet::new(); + let mut seen = new_hash_set(); txn.retain(|tx| seen.insert(tx.txid())); txn } @@ -693,7 +693,7 @@ impl TestChannelMessageHandler { TestChannelMessageHandler { pending_events: Mutex::new(Vec::new()), expected_recv_msgs: Mutex::new(None), - connected_peers: Mutex::new(HashSet::new()), + connected_peers: Mutex::new(new_hash_set()), message_fetch_counter: AtomicUsize::new(0), chain_hash, } @@ -1045,8 +1045,8 @@ impl TestLogger { TestLogger { level: Level::Trace, id, - lines: Mutex::new(HashMap::new()), - context: Mutex::new(HashMap::new()), + lines: Mutex::new(new_hash_map()), + context: Mutex::new(new_hash_map()), } } pub fn enable(&mut self, level: Level) { @@ -1259,7 +1259,7 @@ impl TestKeysInterface { backing: sign::PhantomKeysManager::new(seed, now.as_secs(), now.subsec_nanos(), seed), override_random_bytes: Mutex::new(None), disable_revocation_policy_check: false, - enforcement_states: Mutex::new(HashMap::new()), + enforcement_states: Mutex::new(new_hash_map()), expectations: Mutex::new(None), } } @@ -1340,8 +1340,8 @@ impl TestChainSource { chain_hash: ChainHash::using_genesis_block(network), utxo_ret: Mutex::new(UtxoResult::Sync(Ok(TxOut { value: u64::max_value(), script_pubkey }))), get_utxo_call_count: AtomicUsize::new(0), - watched_txn: Mutex::new(HashSet::new()), - watched_outputs: Mutex::new(HashSet::new()), + watched_txn: Mutex::new(new_hash_set()), + watched_outputs: Mutex::new(new_hash_set()), } } }