/// transactions relevant to the watched channels.
pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
Self {
- monitors: RwLock::new(HashMap::new()),
+ monitors: RwLock::new(new_hash_map()),
sync_persistence_id: AtomicCounter::new(),
chain_source,
broadcaster,
channel_parameters.clone(), initial_holder_commitment_tx, secp_ctx
);
- let mut outputs_to_watch = HashMap::new();
+ let mut outputs_to_watch = new_hash_map();
outputs_to_watch.insert(funding_info.0.txid, vec![(funding_info.0.index as u32, funding_info.1.clone())]);
Self::from_impl(ChannelMonitorImpl {
on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
commitment_secrets: CounterpartyCommitmentSecrets::new(),
- counterparty_claimable_outpoints: HashMap::new(),
- counterparty_commitment_txn_on_chain: HashMap::new(),
- counterparty_hash_commitment_number: HashMap::new(),
- counterparty_fulfilled_htlcs: HashMap::new(),
+ counterparty_claimable_outpoints: new_hash_map(),
+ counterparty_commitment_txn_on_chain: new_hash_map(),
+ counterparty_hash_commitment_number: new_hash_map(),
+ counterparty_fulfilled_htlcs: new_hash_map(),
prev_holder_signed_commitment_tx: None,
current_holder_commitment_tx: holder_commitment_tx,
current_counterparty_commitment_number: 1 << 48,
current_holder_commitment_number,
- payment_preimages: HashMap::new(),
+ payment_preimages: new_hash_map(),
pending_monitor_events: Vec::new(),
pending_events: Vec::new(),
is_processing_pending_events: false,
/// HTLCs which were resolved on-chain (i.e. where the final HTLC resolution was done by an
/// event from this `ChannelMonitor`).
pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap<HTLCSource, (HTLCOutputInCommitment, Option<PaymentPreimage>)> {
- let mut res = HashMap::new();
+ let mut res = new_hash_map();
// Just examine the available counterparty commitment transactions. See docs on
// `fail_unbroadcast_htlcs`, below, for justification.
let us = self.inner.lock().unwrap();
return self.get_all_current_outbound_htlcs();
}
- let mut res = HashMap::new();
+ let mut res = new_hash_map();
macro_rules! walk_htlcs {
($holder_commitment: expr, $htlc_iter: expr) => {
for (htlc, source) in $htlc_iter {
/// Filters a block's `txdata` for transactions spending watched outputs or for any child
/// transactions thereof.
fn filter_block<'a>(&self, txdata: &TransactionData<'a>) -> Vec<&'a Transaction> {
- let mut matched_txn = HashSet::new();
+ let mut matched_txn = new_hash_set();
txdata.iter().filter(|&&(_, tx)| {
let mut matches = self.spends_watched_output(tx);
for input in tx.input.iter() {
}
let counterparty_claimable_outpoints_len: u64 = Readable::read(reader)?;
- let mut counterparty_claimable_outpoints = HashMap::with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
+ let mut counterparty_claimable_outpoints = hash_map_with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
for _ in 0..counterparty_claimable_outpoints_len {
let txid: Txid = Readable::read(reader)?;
let htlcs_count: u64 = Readable::read(reader)?;
}
let counterparty_commitment_txn_on_chain_len: u64 = Readable::read(reader)?;
- let mut counterparty_commitment_txn_on_chain = HashMap::with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
+ let mut counterparty_commitment_txn_on_chain = hash_map_with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..counterparty_commitment_txn_on_chain_len {
let txid: Txid = Readable::read(reader)?;
let commitment_number = <U48 as Readable>::read(reader)?.0;
}
let counterparty_hash_commitment_number_len: u64 = Readable::read(reader)?;
- let mut counterparty_hash_commitment_number = HashMap::with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
+ let mut counterparty_hash_commitment_number = hash_map_with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..counterparty_hash_commitment_number_len {
let payment_hash: PaymentHash = Readable::read(reader)?;
let commitment_number = <U48 as Readable>::read(reader)?.0;
let current_holder_commitment_number = <U48 as Readable>::read(reader)?.0;
let payment_preimages_len: u64 = Readable::read(reader)?;
- let mut payment_preimages = HashMap::with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
+ let mut payment_preimages = hash_map_with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..payment_preimages_len {
let preimage: PaymentPreimage = Readable::read(reader)?;
let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array());
}
let outputs_to_watch_len: u64 = Readable::read(reader)?;
- let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<u32>() + mem::size_of::<Vec<ScriptBuf>>())));
+ let mut outputs_to_watch = hash_map_with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<u32>() + mem::size_of::<Vec<ScriptBuf>>())));
for _ in 0..outputs_to_watch_len {
let txid = Readable::read(reader)?;
let outputs_len: u64 = Readable::read(reader)?;
let mut counterparty_node_id = None;
let mut confirmed_commitment_tx_counterparty_output = None;
let mut spendable_txids_confirmed = Some(Vec::new());
- let mut counterparty_fulfilled_htlcs = Some(HashMap::new());
+ let mut counterparty_fulfilled_htlcs = Some(new_hash_map());
let mut initial_counterparty_commitment_info = None;
let mut channel_id = None;
read_tlv_fields!(reader, {
signer.provide_channel_parameters(&channel_parameters);
let pending_claim_requests_len: u64 = Readable::read(reader)?;
- let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
+ let mut pending_claim_requests = hash_map_with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..pending_claim_requests_len {
pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
}
let claimable_outpoints_len: u64 = Readable::read(reader)?;
- let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
+ let mut claimable_outpoints = hash_map_with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..claimable_outpoints_len {
let outpoint = Readable::read(reader)?;
let ancestor_claim_txid = Readable::read(reader)?;
prev_holder_commitment: None,
signer,
channel_transaction_parameters: channel_parameters,
- pending_claim_requests: HashMap::new(),
- claimable_outpoints: HashMap::new(),
+ pending_claim_requests: new_hash_map(),
+ claimable_outpoints: new_hash_map(),
locktimed_packages: BTreeMap::new(),
onchain_events_awaiting_threshold_conf: Vec::new(),
pending_claim_events: Vec::new(),
F::Target: FeeEstimator,
{
log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height);
- let mut bump_candidates = HashMap::new();
+ let mut bump_candidates = new_hash_map();
for tx in txn_matched {
// Scan all input to verify is one of the outpoint spent is of interest for us
let mut claimed_outputs_material = Vec::new();
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
{
- let mut bump_candidates = HashMap::new();
+ let mut bump_candidates = new_hash_map();
let onchain_events_awaiting_threshold_conf =
self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
for entry in onchain_events_awaiting_threshold_conf {
/// Returns a new instance backed by the given [`WalletSource`] that serves as an implementation
/// of [`CoinSelectionSource`].
pub fn new(source: W, logger: L) -> Self {
- Self { source, logger, locked_utxos: Mutex::new(HashMap::new()) }
+ Self { source, logger, locked_utxos: Mutex::new(new_hash_map()) }
}
/// Performs coin selection on the set of UTXOs obtained from
extern crate hashbrown;
pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque, boxed::Box};
+
+ #[cfg(not(feature = "hashbrown"))]
+ mod std_hashtables {
+ pub(crate) use std::collections::{HashMap, HashSet, hash_map};
+
+ pub(crate) type OccupiedHashMapEntry<'a, K, V> =
+ std::collections::hash_map::OccupiedEntry<'a, K, V>;
+ pub(crate) type VacantHashMapEntry<'a, K, V> =
+ std::collections::hash_map::VacantEntry<'a, K, V>;
+ }
#[cfg(not(feature = "hashbrown"))]
- pub use std::collections::{HashMap, HashSet, hash_map};
+ pub(crate) use std_hashtables::*;
+
+ #[cfg(feature = "hashbrown")]
+ mod hashbrown_tables {
+ pub(crate) use hashbrown::{HashMap, HashSet, hash_map};
+
+ pub(crate) type OccupiedHashMapEntry<'a, K, V> =
+ hashbrown::hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>;
+ pub(crate) type VacantHashMapEntry<'a, K, V> =
+ hashbrown::hash_map::VacantEntry<'a, K, V, hash_map::DefaultHashBuilder>;
+ }
#[cfg(feature = "hashbrown")]
- pub use self::hashbrown::{HashMap, HashSet, hash_map};
+ pub(crate) use hashbrown_tables::*;
+
+ pub(crate) fn new_hash_map<K: core::hash::Hash + Eq, V>() -> HashMap<K, V> { HashMap::new() }
+ pub(crate) fn hash_map_with_capacity<K: core::hash::Hash + Eq, V>(cap: usize) -> HashMap<K, V> {
+ HashMap::with_capacity(cap)
+ }
+
+ pub(crate) fn new_hash_set<K: core::hash::Hash + Eq>() -> HashSet<K> { HashSet::new() }
+ pub(crate) fn hash_set_with_capacity<K: core::hash::Hash + Eq>(cap: usize) -> HashSet<K> {
+ HashSet::with_capacity(cap)
+ }
pub use alloc::borrow::ToOwned;
pub use alloc::string::ToString;
channel_ready_event_emitted: false,
#[cfg(any(test, fuzzing))]
- historical_inbound_htlc_fulfills: HashSet::new(),
+ historical_inbound_htlc_fulfills: new_hash_set(),
channel_type,
channel_keys_id,
channel_ready_event_emitted: false,
#[cfg(any(test, fuzzing))]
- historical_inbound_htlc_fulfills: HashSet::new(),
+ historical_inbound_htlc_fulfills: new_hash_set(),
channel_type,
channel_keys_id,
let channel_update_status = Readable::read(reader)?;
#[cfg(any(test, fuzzing))]
- let mut historical_inbound_htlc_fulfills = HashSet::new();
+ let mut historical_inbound_htlc_fulfills = new_hash_set();
#[cfg(any(test, fuzzing))]
{
let htlc_fulfills_len: u64 = Readable::read(reader)?;
best_block: RwLock::new(params.best_block),
- outbound_scid_aliases: Mutex::new(HashSet::new()),
- pending_inbound_payments: Mutex::new(HashMap::new()),
+ outbound_scid_aliases: Mutex::new(new_hash_set()),
+ pending_inbound_payments: Mutex::new(new_hash_map()),
pending_outbound_payments: OutboundPayments::new(),
- forward_htlcs: Mutex::new(HashMap::new()),
- claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
- pending_intercepted_htlcs: Mutex::new(HashMap::new()),
- outpoint_to_peer: Mutex::new(HashMap::new()),
- short_to_chan_info: FairRwLock::new(HashMap::new()),
+ forward_htlcs: Mutex::new(new_hash_map()),
+ claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
+ pending_intercepted_htlcs: Mutex::new(new_hash_map()),
+ outpoint_to_peer: Mutex::new(new_hash_map()),
+ short_to_chan_info: FairRwLock::new(new_hash_map()),
our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
secp_ctx,
highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
- per_peer_state: FairRwLock::new(HashMap::new()),
+ per_peer_state: FairRwLock::new(new_hash_map()),
pending_events: Mutex::new(VecDeque::new()),
pending_events_processor: AtomicBool::new(false),
ProbeSendFailure::RouteNotFound
})?;
- let mut used_liquidity_map = HashMap::with_capacity(first_hops.len());
+ let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
let mut res = Vec::new();
let mut failed_forwards = Vec::new();
let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
- let mut forward_htlcs = HashMap::new();
+ let mut forward_htlcs = new_hash_map();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs {
return NotifyOption::SkipPersistNoEvents;
}
e.insert(Mutex::new(PeerState {
- channel_by_id: HashMap::new(),
- inbound_channel_request_by_id: HashMap::new(),
+ channel_by_id: new_hash_map(),
+ inbound_channel_request_by_id: new_hash_map(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
}
// Encode without retry info for 0.0.101 compatibility.
- let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
+ let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
for (id, outbound) in pending_outbound_payments.iter() {
match outbound {
PendingOutboundPayment::Legacy { session_privs } |
for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
if !updates.is_empty() {
- if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); }
+ if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
}
}
let mut failed_htlcs = Vec::new();
let channel_count: u64 = Readable::read(reader)?;
- let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
- let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
+ let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
+ let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
+ let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = VecDeque::new();
let mut close_background_events = Vec::new();
- let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize);
+ let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
for _ in 0..channel_count {
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
},
hash_map::Entry::Vacant(entry) => {
- let mut by_id_map = HashMap::new();
+ let mut by_id_map = new_hash_map();
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
entry.insert(by_id_map);
}
const MAX_ALLOC_SIZE: usize = 1024 * 64;
let forward_htlcs_count: u64 = Readable::read(reader)?;
- let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
+ let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
for _ in 0..forward_htlcs_count {
let short_channel_id = Readable::read(reader)?;
let pending_forwards_count: u64 = Readable::read(reader)?;
let peer_state_from_chans = |channel_by_id| {
PeerState {
channel_by_id,
- inbound_channel_request_by_id: HashMap::new(),
+ inbound_channel_request_by_id: new_hash_map(),
latest_features: InitFeatures::empty(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
};
let peer_count: u64 = Readable::read(reader)?;
- let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
+ let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
for _ in 0..peer_count {
let peer_pubkey = Readable::read(reader)?;
- let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
+ let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
let mut peer_state = peer_state_from_chans(peer_chans);
peer_state.latest_features = Readable::read(reader)?;
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
let highest_seen_timestamp: u32 = Readable::read(reader)?;
let pending_inbound_payment_count: u64 = Readable::read(reader)?;
- let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
+ let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
for _ in 0..pending_inbound_payment_count {
if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
return Err(DecodeError::InvalidValue);
let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
- HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
+ hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
for _ in 0..pending_outbound_payments_count_compat {
let session_priv = Readable::read(reader)?;
let payment = PendingOutboundPayment::Legacy {
// pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
let mut pending_outbound_payments = None;
- let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(HashMap::new());
+ let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
let mut received_network_pubkey: Option<PublicKey> = None;
let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
let mut claimable_htlc_onion_fields = None;
- let mut pending_claiming_payments = Some(HashMap::new());
+ let mut pending_claiming_payments = Some(new_hash_map());
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
let mut events_override = None;
let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
- let mut outbounds = HashMap::new();
+ let mut outbounds = new_hash_map();
for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
// still open, we need to replay any monitor updates that are for closed channels,
// creating the neccessary peer_state entries as we go.
let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
- Mutex::new(peer_state_from_chans(HashMap::new()))
+ Mutex::new(peer_state_from_chans(new_hash_map()))
});
let mut peer_state = peer_state_mutex.lock().unwrap();
handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
- let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len());
+ let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
if let Some(purposes) = claimable_htlc_purposes {
if purposes.len() != claimable_htlcs_list.len() {
return Err(DecodeError::InvalidValue);
}
}
- let mut outbound_scid_aliases = HashSet::new();
+ let mut outbound_scid_aliases = new_hash_set();
for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let (scid_1, scid_2) = (42, 43);
- let mut forward_htlcs = HashMap::new();
+ let mut forward_htlcs = new_hash_map();
forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
// Before using all the new monitors to check the watch outpoints, use the full set of
// them to ensure we can write and reload our ChannelManager.
{
- let mut channel_monitors = HashMap::new();
+ let mut channel_monitors = new_hash_map();
for monitor in deserialized_monitors.iter_mut() {
channel_monitors.insert(monitor.get_funding_txo().0, monitor);
}
let mut node_read = &chanman_encoded[..];
let (_, node_deserialized) = {
- let mut channel_monitors = HashMap::new();
+ let mut channel_monitors = new_hash_map();
for monitor in monitors_read.iter_mut() {
assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
}
/// also fail.
pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
- let mut txn_seen = HashSet::new();
+ let mut txn_seen = new_hash_set();
node_txn.retain(|tx| txn_seen.insert(tx.txid()));
assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
pub fn check_preimage_claim<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
- let mut txn_seen = HashSet::new();
+ let mut txn_seen = new_hash_set();
node_txn.retain(|tx| txn_seen.insert(tx.txid()));
let mut found_prev = false;
macro_rules! get_chan_reestablish_msgs {
($src_node: expr, $dst_node: expr) => {
{
- let mut announcements = $crate::prelude::HashSet::new();
+ let mut announcements = $crate::prelude::new_hash_set();
let mut res = Vec::with_capacity(1);
for msg in $src_node.node.get_and_clear_pending_msg_events() {
if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
// block connection just like the !deliver_bs_raa case
}
- let mut failed_htlcs = HashSet::new();
+ let mut failed_htlcs = new_hash_set();
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
mine_transaction(&nodes[1], &revoked_local_txn[0]);
let as_events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
- let mut as_failds = HashSet::new();
+ let mut as_failds = new_hash_set();
let mut as_updates = 0;
for event in as_events.iter() {
if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
let bs_events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
- let mut bs_failds = HashSet::new();
+ let mut bs_failds = new_hash_set();
let mut bs_updates = 0;
for event in bs_events.iter() {
if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
}
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- let mut chans_disabled = HashMap::new();
+ let mut chans_disabled = new_hash_map();
for e in msg_events {
match e {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
check_spends!(revoked_htlc_claim, htlc_tx);
}
- let mut revoked_claim_transaction_map = HashMap::new();
+ let mut revoked_claim_transaction_map = new_hash_map();
for current_tx in txn.into_iter() {
revoked_claim_transaction_map.insert(current_tx.txid(), current_tx);
}
}
fn mark_fulfilled(&mut self) {
- let mut session_privs = HashSet::new();
+ let mut session_privs = new_hash_set();
core::mem::swap(&mut session_privs, match self {
PendingOutboundPayment::Legacy { session_privs } |
PendingOutboundPayment::Retryable { session_privs, .. } |
fn mark_abandoned(&mut self, reason: PaymentFailureReason) {
if let PendingOutboundPayment::Retryable { session_privs, payment_hash, .. } = self {
- let mut our_session_privs = HashSet::new();
+ let mut our_session_privs = new_hash_set();
core::mem::swap(&mut our_session_privs, session_privs);
*self = PendingOutboundPayment::Abandoned {
session_privs: our_session_privs,
};
} else if let PendingOutboundPayment::InvoiceReceived { payment_hash, .. } = self {
*self = PendingOutboundPayment::Abandoned {
- session_privs: HashSet::new(),
+ session_privs: new_hash_set(),
payment_hash: *payment_hash,
reason: Some(reason)
};
impl OutboundPayments {
pub(super) fn new() -> Self {
Self {
- pending_outbound_payments: Mutex::new(HashMap::new()),
+ pending_outbound_payments: Mutex::new(new_hash_map()),
retry_lock: Mutex::new(()),
}
}
retry_strategy,
attempts: PaymentAttempts::new(),
payment_params,
- session_privs: HashSet::new(),
+ session_privs: new_hash_set(),
pending_amt_msat: 0,
pending_fee_msat: Some(0),
payment_hash,
PeerManager {
message_handler,
- peers: FairRwLock::new(HashMap::new()),
- node_id_to_descriptor: Mutex::new(HashMap::new()),
+ peers: FairRwLock::new(new_hash_map()),
+ node_id_to_descriptor: Mutex::new(new_hash_map()),
event_processing_state: AtomicI32::new(0),
ephemeral_key_midstate,
peer_counter: AtomicCounter::new(),
self.update_gossip_backlogged();
let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
- let mut peers_to_disconnect = HashMap::new();
+ let mut peers_to_disconnect = new_hash_map();
{
let peers_lock = self.peers.read().unwrap();
OnionMessenger {
entropy_source,
node_signer,
- message_recipients: Mutex::new(HashMap::new()),
+ message_recipients: Mutex::new(new_hash_map()),
secp_ctx,
logger,
message_router,
#[cfg(test)]
pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<OnionMessage>> {
let mut message_recipients = self.message_recipients.lock().unwrap();
- let mut msgs = HashMap::new();
+ let mut msgs = new_hash_map();
// We don't want to disconnect the peers by removing them entirely from the original map, so we
// release the pending message buffers individually.
for (node_id, recipient) in &mut *message_recipients {
channels: RwLock::new(channels),
nodes: RwLock::new(nodes),
last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp),
- removed_nodes: Mutex::new(HashMap::new()),
- removed_channels: Mutex::new(HashMap::new()),
+ removed_nodes: Mutex::new(new_hash_map()),
+ removed_channels: Mutex::new(new_hash_map()),
pending_checks: utxo::PendingChecks::new(),
})
}
channels: RwLock::new(IndexedMap::new()),
nodes: RwLock::new(IndexedMap::new()),
last_rapid_gossip_sync_timestamp: Mutex::new(None),
- removed_channels: Mutex::new(HashMap::new()),
- removed_nodes: Mutex::new(HashMap::new()),
+ removed_channels: Mutex::new(new_hash_map()),
+ removed_nodes: Mutex::new(new_hash_map()),
pending_checks: utxo::PendingChecks::new(),
}
}
impl InFlightHtlcs {
/// Constructs an empty `InFlightHtlcs`.
- pub fn new() -> Self { InFlightHtlcs(HashMap::new()) }
+ pub fn new() -> Self { InFlightHtlcs(new_hash_map()) }
/// Takes in a path with payer's node id and adds the path's details to `InFlightHtlcs`.
pub fn process_path(&mut self, path: &Path, payer_node_id: PublicKey) {
// inserting first hops suggested by the caller as targets.
// Our search will then attempt to reach them while traversing from the payee node.
let mut first_hop_targets: HashMap<_, Vec<&ChannelDetails>> =
- HashMap::with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 });
+ hash_map_with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 });
if let Some(hops) = first_hops {
for chan in hops {
if chan.get_outbound_payment_scid().is_none() {
}
}
- let mut private_hop_key_cache = HashMap::with_capacity(
+ let mut private_hop_key_cache = hash_map_with_capacity(
payment_params.payee.unblinded_route_hints().iter().map(|path| path.0.len()).sum()
);
// Map from node_id to information about the best current path to that node, including feerate
// information.
- let mut dist: HashMap<NodeId, PathBuildingHop> = HashMap::with_capacity(network_nodes.len());
+ let mut dist: HashMap<NodeId, PathBuildingHop> = hash_map_with_capacity(network_nodes.len());
// During routing, if we ignore a path due to an htlc_minimum_msat limit, we set this,
// indicating that we may wish to try again with a higher value, potentially paying to meet an
// is used. Hence, liquidity used in one direction will not offset any used in the opposite
// direction.
let mut used_liquidities: HashMap<CandidateHopId, u64> =
- HashMap::with_capacity(network_nodes.len());
+ hash_map_with_capacity(network_nodes.len());
// Keeping track of how much value we already collected across other paths. Helps to decide
// when we want to stop looking for new paths.
base_penalty_amount_multiplier_msat: 8192,
liquidity_penalty_multiplier_msat: 30_000,
liquidity_penalty_amount_multiplier_msat: 192,
- manual_node_penalties: HashMap::new(),
+ manual_node_penalties: new_hash_map(),
anti_probing_penalty_msat: 250,
considered_impossible_penalty_msat: 1_0000_0000_000,
historical_liquidity_penalty_multiplier_msat: 10_000,
/// Clears the list of manual penalties that are applied during path finding.
pub fn clear_manual_penalties(&mut self) {
- self.manual_node_penalties = HashMap::new();
+ self.manual_node_penalties = new_hash_map();
}
}
liquidity_penalty_amount_multiplier_msat: 0,
historical_liquidity_penalty_multiplier_msat: 0,
historical_liquidity_penalty_amount_multiplier_msat: 0,
- manual_node_penalties: HashMap::new(),
+ manual_node_penalties: new_hash_map(),
anti_probing_penalty_msat: 0,
considered_impossible_penalty_msat: 0,
linear_success_probability: true,
decay_params,
network_graph,
logger,
- channel_liquidities: HashMap::new(),
+ channel_liquidities: new_hash_map(),
}
}
r: &mut R, args: (ProbabilisticScoringDecayParameters, G, L)
) -> Result<Self, DecodeError> {
let (decay_params, network_graph, logger) = args;
- let mut channel_liquidities = HashMap::new();
+ let mut channel_liquidities = new_hash_map();
read_tlv_fields!(r, {
(0, channel_liquidities, required),
});
impl PendingChecks {
pub(super) fn new() -> Self {
PendingChecks { internal: Mutex::new(PendingChecksContext {
- channels: HashMap::new(), nodes: HashMap::new(),
+ channels: new_hash_map(), nodes: new_hash_map(),
}) }
}
let mut input = Vec::with_capacity(descriptors.len());
let mut input_value = 0;
let mut witness_weight = 0;
- let mut output_set = HashSet::with_capacity(descriptors.len());
+ let mut output_set = hash_set_with_capacity(descriptors.len());
for outp in descriptors {
match outp {
SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => {
pub use std::sync::WaitTimeoutResult;
-use crate::prelude::HashMap;
+use crate::prelude::*;
use super::{LockTestExt, LockHeldState};
thread_local! {
/// We track the set of locks currently held by a reference to their `LockMetadata`
- static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(HashMap::new());
+ static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(new_hash_map());
}
static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64;
let res = Arc::new(LockMetadata {
- locked_before: StdMutex::new(HashMap::new()),
+ locked_before: StdMutex::new(new_hash_map()),
lock_idx,
_lock_construction_bt: backtrace,
});
{
let (lock_constr_location, lock_constr_colno) =
locate_call_symbol(&res._lock_construction_bt);
- LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
+ LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(new_hash_map())); } });
let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
match locks.entry(lock_constr_location) {
hash_map::Entry::Occupied(e) => {
//! This module has a map which can be iterated in a deterministic order. See the [`IndexedMap`].
-use crate::prelude::{HashMap, hash_map};
+use crate::prelude::*;
use alloc::vec::Vec;
use alloc::slice::Iter;
use core::hash::Hash;
/// Constructs a new, empty map
pub fn new() -> Self {
Self {
- map: HashMap::new(),
+ map: new_hash_map(),
keys: Vec::new(),
}
}
/// Constructs a new, empty map with the given capacity pre-allocated
pub fn with_capacity(capacity: usize) -> Self {
Self {
- map: HashMap::with_capacity(capacity),
+ map: hash_map_with_capacity(capacity),
keys: Vec::with_capacity(capacity),
}
}
///
/// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly.
pub struct VacantEntry<'a, K: Hash + Ord, V> {
- #[cfg(feature = "hashbrown")]
- underlying_entry: hash_map::VacantEntry<'a, K, V, hash_map::DefaultHashBuilder>,
- #[cfg(not(feature = "hashbrown"))]
- underlying_entry: hash_map::VacantEntry<'a, K, V>,
+ underlying_entry: VacantHashMapEntry<'a, K, V>,
key: K,
keys: &'a mut Vec<K>,
}
///
/// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly.
pub struct OccupiedEntry<'a, K: Hash + Ord, V> {
- #[cfg(feature = "hashbrown")]
- underlying_entry: hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>,
- #[cfg(not(feature = "hashbrown"))]
- underlying_entry: hash_map::OccupiedEntry<'a, K, V>,
+ underlying_entry: OccupiedHashMapEntry<'a, K, V>,
keys: &'a mut Vec<K>,
}
}
impl_for_map!(BTreeMap, Ord, |_| BTreeMap::new());
-impl_for_map!(HashMap, Hash, |len| HashMap::with_capacity(len));
+impl_for_map!(HashMap, Hash, |len| hash_map_with_capacity(len));
// HashSet
impl<T> Writeable for HashSet<T>
#[inline]
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let len: CollectionLength = Readable::read(r)?;
- let mut ret = HashSet::with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::<T>()));
+ let mut ret = hash_set_with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::<T>()));
for _ in 0..len.0 {
if !ret.insert(T::read(r)?) {
return Err(DecodeError::InvalidValue)
pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
Self {
added_monitors: Mutex::new(Vec::new()),
- monitor_updates: Mutex::new(HashMap::new()),
- latest_monitor_update_id: Mutex::new(HashMap::new()),
+ monitor_updates: Mutex::new(new_hash_map()),
+ latest_monitor_update_id: Mutex::new(new_hash_map()),
chain_monitor: chainmonitor::ChainMonitor::new(chain_source, broadcaster, logger, fee_estimator, persister),
keys_manager,
expect_channel_force_closed: Mutex::new(None),
pub(crate) fn new(destination_script: ScriptBuf) -> Self {
WatchtowerPersister {
persister: TestPersister::new(),
- unsigned_justice_tx_data: Mutex::new(HashMap::new()),
- watchtower_state: Mutex::new(HashMap::new()),
+ unsigned_justice_tx_data: Mutex::new(new_hash_map()),
+ watchtower_state: Mutex::new(new_hash_map()),
destination_script,
}
}
assert!(self.unsigned_justice_tx_data.lock().unwrap()
.insert(funding_txo, VecDeque::new()).is_none());
assert!(self.watchtower_state.lock().unwrap()
- .insert(funding_txo, HashMap::new()).is_none());
+ .insert(funding_txo, new_hash_map()).is_none());
let initial_counterparty_commitment_tx = data.initial_counterparty_commitment_tx()
.expect("First and only call expects Some");
pub fn new() -> Self {
Self {
update_rets: Mutex::new(VecDeque::new()),
- chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
- offchain_monitor_updates: Mutex::new(HashMap::new()),
+ chain_sync_monitor_persistences: Mutex::new(new_hash_map()),
+ offchain_monitor_updates: Mutex::new(new_hash_map()),
}
}
}
let is_chain_sync = if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false };
if is_chain_sync {
- self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
+ self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update_id);
} else {
- self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
+ self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update_id);
}
ret
}
impl TestStore {
pub fn new(read_only: bool) -> Self {
- let persisted_bytes = Mutex::new(HashMap::new());
+ let persisted_bytes = Mutex::new(new_hash_map());
Self { persisted_bytes, read_only }
}
}
} else {
format!("{}/{}", primary_namespace, secondary_namespace)
};
- let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new());
+ let outer_e = persisted_lock.entry(prefixed).or_insert(new_hash_map());
let mut bytes = Vec::new();
bytes.write_all(buf)?;
outer_e.insert(key.to_string(), bytes);
pub fn unique_txn_broadcast(&self) -> Vec<Transaction> {
let mut txn = self.txn_broadcasted.lock().unwrap().split_off(0);
- let mut seen = HashSet::new();
+ let mut seen = new_hash_set();
txn.retain(|tx| seen.insert(tx.txid()));
txn
}
TestChannelMessageHandler {
pending_events: Mutex::new(Vec::new()),
expected_recv_msgs: Mutex::new(None),
- connected_peers: Mutex::new(HashSet::new()),
+ connected_peers: Mutex::new(new_hash_set()),
message_fetch_counter: AtomicUsize::new(0),
chain_hash,
}
TestLogger {
level: Level::Trace,
id,
- lines: Mutex::new(HashMap::new()),
- context: Mutex::new(HashMap::new()),
+ lines: Mutex::new(new_hash_map()),
+ context: Mutex::new(new_hash_map()),
}
}
pub fn enable(&mut self, level: Level) {
backing: sign::PhantomKeysManager::new(seed, now.as_secs(), now.subsec_nanos(), seed),
override_random_bytes: Mutex::new(None),
disable_revocation_policy_check: false,
- enforcement_states: Mutex::new(HashMap::new()),
+ enforcement_states: Mutex::new(new_hash_map()),
expectations: Mutex::new(None),
}
}
chain_hash: ChainHash::using_genesis_block(network),
utxo_ret: Mutex::new(UtxoResult::Sync(Ok(TxOut { value: u64::max_value(), script_pubkey }))),
get_utxo_call_count: AtomicUsize::new(0),
- watched_txn: Mutex::new(HashSet::new()),
- watched_outputs: Mutex::new(HashSet::new()),
+ watched_txn: Mutex::new(new_hash_set()),
+ watched_outputs: Mutex::new(new_hash_set()),
}
}
}