run: |
cd lightning
RUSTFLAGS="--cfg=require_route_graph_test" cargo test
- RUSTFLAGS="--cfg=require_route_graph_test" cargo test --features hashbrown
+ RUSTFLAGS="--cfg=require_route_graph_test" cargo test --features hashbrown,ahash
cd ..
- name: Run benchmarks on Rust ${{ matrix.toolchain }}
run: |
harness = false
[features]
-hashbrown = ["lightning/hashbrown"]
+hashbrown = ["lightning/hashbrown", "lightning/ahash"]
[dependencies]
lightning = { path = "../lightning", features = ["_test_utils", "criterion"] }
pass
elif feature == "no-std":
pass
+ elif feature == "ahash":
+ pass
elif feature == "hashbrown":
pass
elif feature == "backtrace":
lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
bitcoin = { version = "0.30.2", features = ["secp-lowmemory"] }
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
-hashbrown = "0.8"
+hashbrown = "0.13"
afl = { version = "0.12", optional = true }
honggfuzz = { version = "0.5", optional = true, default-features = false }
lightning = { version = "0.0.121", path = "../lightning", default-features = false }
secp256k1 = { version = "0.27.0", default-features = false, features = ["recovery", "alloc"] }
num-traits = { version = "0.2.8", default-features = false }
-hashbrown = { version = "0.8", optional = true }
+hashbrown = { version = "0.13", optional = true }
serde = { version = "1.0.118", optional = true }
bitcoin = { version = "0.30.2", default-features = false }
# Override signing to not include randomness when generating signatures for test vectors.
_test_vectors = []
-no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc", "libm"]
+no-std = ["hashbrown", "ahash", "bitcoin/no-std", "core2/alloc", "libm"]
std = ["bitcoin/std"]
# Generates low-r bitcoin signatures, which saves 1 byte in 50% of the cases
[dependencies]
bitcoin = { version = "0.30.2", default-features = false, features = ["secp-recovery"] }
-hashbrown = { version = "0.8", optional = true }
+hashbrown = { version = "0.13", optional = true }
+ahash = { version = "0.8", optional = true, default-features = false }
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
regex = { version = "1.5.6", optional = true }
backtrace = { version = "0.3", optional = true }
core2 = { version = "0.3.0", optional = true, default-features = false }
libm = { version = "0.2", optional = true, default-features = false }
+# Because ahash no longer (kinda poorly) does it for us, (roughly) list out the targets that
+# getrandom supports and turn on ahash's `runtime-rng` feature for them.
+[target.'cfg(not(any(target_os = "unknown", target_os = "none")))'.dependencies]
+ahash = { version = "0.8", optional = true, default-features = false, features = ["runtime-rng"] }
+
+# Not sure what target_os gets set to for sgx, so to be safe always enable runtime-rng for x86_64
+# platforms (assuming LDK isn't being used on embedded x86-64 running directly on metal).
+[target.'cfg(target_arch = "x86_64")'.dependencies]
+ahash = { version = "0.8", optional = true, default-features = false, features = ["runtime-rng"] }
+
[dev-dependencies]
regex = "1.5.6"
if let Some(claim_id) = claim_id {
if let Some(claim) = self.pending_claim_requests.remove(&claim_id) {
for outpoint in claim.outpoints() {
- self.claimable_outpoints.remove(&outpoint);
+ self.claimable_outpoints.remove(outpoint);
}
}
} else {
mod prelude {
#[cfg(feature = "hashbrown")]
extern crate hashbrown;
+ #[cfg(feature = "ahash")]
+ extern crate ahash;
pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque, boxed::Box};
+ pub use alloc::borrow::ToOwned;
+ pub use alloc::string::ToString;
+
+ // For no-std builds, we need to use hashbrown, however, by default, it doesn't randomize the
+ // hashing and is vulnerable to HashDoS attacks. Thus, when not fuzzing, we use its default
+ // ahash hashing algorithm but randomize, opting to not randomize when fuzzing to avoid false
+ // positive branch coverage.
+
#[cfg(not(feature = "hashbrown"))]
mod std_hashtables {
pub(crate) use std::collections::{HashMap, HashSet, hash_map};
pub(crate) use std_hashtables::*;
#[cfg(feature = "hashbrown")]
- mod hashbrown_tables {
- pub(crate) use hashbrown::{HashMap, HashSet, hash_map};
+ pub(crate) use self::hashbrown::hash_map;
+
+ #[cfg(all(feature = "hashbrown", fuzzing))]
+ mod nonrandomized_hashbrown {
+ pub(crate) use hashbrown::{HashMap, HashSet};
pub(crate) type OccupiedHashMapEntry<'a, K, V> =
- hashbrown::hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>;
+ hashbrown::hash_map::OccupiedEntry<'a, K, V, hashbrown::hash_map::DefaultHashBuilder>;
pub(crate) type VacantHashMapEntry<'a, K, V> =
- hashbrown::hash_map::VacantEntry<'a, K, V, hash_map::DefaultHashBuilder>;
+ hashbrown::hash_map::VacantEntry<'a, K, V, hashbrown::hash_map::DefaultHashBuilder>;
}
- #[cfg(feature = "hashbrown")]
- pub(crate) use hashbrown_tables::*;
+ #[cfg(all(feature = "hashbrown", fuzzing))]
+ pub(crate) use nonrandomized_hashbrown::*;
- pub(crate) fn new_hash_map<K: core::hash::Hash + Eq, V>() -> HashMap<K, V> { HashMap::new() }
- pub(crate) fn hash_map_with_capacity<K: core::hash::Hash + Eq, V>(cap: usize) -> HashMap<K, V> {
- HashMap::with_capacity(cap)
- }
- pub(crate) fn hash_map_from_iter<K: core::hash::Hash + Eq, V, I: IntoIterator<Item = (K, V)>>(iter: I) -> HashMap<K, V> {
- HashMap::from_iter(iter)
- }
- pub(crate) fn new_hash_set<K: core::hash::Hash + Eq>() -> HashSet<K> { HashSet::new() }
- pub(crate) fn hash_set_with_capacity<K: core::hash::Hash + Eq>(cap: usize) -> HashSet<K> {
- HashSet::with_capacity(cap)
+ #[cfg(all(feature = "hashbrown", not(fuzzing)))]
+ mod randomized_hashtables {
+ use super::*;
+ use ahash::RandomState;
+
+ pub(crate) type HashMap<K, V> = hashbrown::HashMap<K, V, RandomState>;
+ pub(crate) type HashSet<K> = hashbrown::HashSet<K, RandomState>;
+
+ pub(crate) type OccupiedHashMapEntry<'a, K, V> =
+ hashbrown::hash_map::OccupiedEntry<'a, K, V, RandomState>;
+ pub(crate) type VacantHashMapEntry<'a, K, V> =
+ hashbrown::hash_map::VacantEntry<'a, K, V, RandomState>;
+
+ pub(crate) fn new_hash_map<K, V>() -> HashMap<K, V> {
+ HashMap::with_hasher(RandomState::new())
+ }
+ pub(crate) fn hash_map_with_capacity<K, V>(cap: usize) -> HashMap<K, V> {
+ HashMap::with_capacity_and_hasher(cap, RandomState::new())
+ }
+ pub(crate) fn hash_map_from_iter<K: core::hash::Hash + Eq, V, I: IntoIterator<Item=(K, V)>>(iter: I) -> HashMap<K, V> {
+ let iter = iter.into_iter();
+ let min_size = iter.size_hint().0;
+ let mut res = HashMap::with_capacity_and_hasher(min_size, RandomState::new());
+ res.extend(iter);
+ res
+ }
+
+ pub(crate) fn new_hash_set<K>() -> HashSet<K> {
+ HashSet::with_hasher(RandomState::new())
+ }
+ pub(crate) fn hash_set_with_capacity<K>(cap: usize) -> HashSet<K> {
+ HashSet::with_capacity_and_hasher(cap, RandomState::new())
+ }
+ pub(crate) fn hash_set_from_iter<K: core::hash::Hash + Eq, I: IntoIterator<Item=K>>(iter: I) -> HashSet<K> {
+ let iter = iter.into_iter();
+ let min_size = iter.size_hint().0;
+ let mut res = HashSet::with_capacity_and_hasher(min_size, RandomState::new());
+ res.extend(iter);
+ res
+ }
}
- pub(crate) fn hash_set_from_iter<K: core::hash::Hash + Eq, I: IntoIterator<Item = K>>(iter: I) -> HashSet<K> {
- HashSet::from_iter(iter)
+
+ #[cfg(any(not(feature = "hashbrown"), fuzzing))]
+ mod randomized_hashtables {
+ use super::*;
+
+ pub(crate) fn new_hash_map<K, V>() -> HashMap<K, V> { HashMap::new() }
+ pub(crate) fn hash_map_with_capacity<K, V>(cap: usize) -> HashMap<K, V> {
+ HashMap::with_capacity(cap)
+ }
+ pub(crate) fn hash_map_from_iter<K: core::hash::Hash + Eq, V, I: IntoIterator<Item=(K, V)>>(iter: I) -> HashMap<K, V> {
+ HashMap::from_iter(iter)
+ }
+
+ pub(crate) fn new_hash_set<K>() -> HashSet<K> { HashSet::new() }
+ pub(crate) fn hash_set_with_capacity<K>(cap: usize) -> HashSet<K> {
+ HashSet::with_capacity(cap)
+ }
+ pub(crate) fn hash_set_from_iter<K: core::hash::Hash + Eq, I: IntoIterator<Item=K>>(iter: I) -> HashSet<K> {
+ HashSet::from_iter(iter)
+ }
}
- pub use alloc::borrow::ToOwned;
- pub use alloc::string::ToString;
+ pub(crate) use randomized_hashtables::*;
}
#[cfg(all(not(ldk_bench), feature = "backtrace", feature = "std", test))]
// TODO: Once we can rely on the counterparty_node_id from the
// monitor event, this and the outpoint_to_peer map should be removed.
let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
- match outpoint_to_peer.get(&funding_txo) {
+ match outpoint_to_peer.get(funding_txo) {
Some(cp_id) => cp_id.clone(),
None => return,
}
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let channel =
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
chan
} else {
let update_actions = peer_state.monitor_update_blocked_actions
downstream_counterparty_and_funding_outpoint:
Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
} = action {
- if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+ if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
blocked_channel_id);
let mut aggregate_path_contribution_msat = path_value_msat;
for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() {
- let target = private_hop_key_cache.get(&prev_hop_id).unwrap();
+ let target = private_hop_key_cache.get(prev_hop_id).unwrap();
- if let Some(first_channels) = first_hop_targets.get(&target) {
+ if let Some(first_channels) = first_hop_targets.get(target) {
if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) {
log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.",
hop.short_channel_id);
let candidate = network_channels
.get(&hop.short_channel_id)
- .and_then(|channel| channel.as_directed_to(&target))
+ .and_then(|channel| channel.as_directed_to(target))
.map(|(info, _)| CandidateRouteHop::PublicHop(PublicHopCandidate {
info,
short_channel_id: hop.short_channel_id,
.saturating_add(1);
// Searching for a direct channel between last checked hop and first_hop_targets
- if let Some(first_channels) = first_hop_targets.get_mut(&target) {
+ if let Some(first_channels) = first_hop_targets.get_mut(target) {
sort_first_hop_channels(first_channels, &used_liquidities,
recommended_value_msat, our_node_pubkey);
for details in first_channels {
_ => return 0,
};
let source = candidate.source();
- if let Some(penalty) = score_params.manual_node_penalties.get(&target) {
+ if let Some(penalty) = score_params.manual_node_penalties.get(target) {
return *penalty;
}
let amount_msat = usage.amount_msat.saturating_add(usage.inflight_htlc_msat);
let capacity_msat = usage.effective_capacity.as_msat();
self.channel_liquidities
- .get(&scid)
+ .get(scid)
.unwrap_or(&ChannelLiquidity::new(Duration::ZERO))
.as_directed(&source, &target, capacity_msat)
.penalty_msat(amount_msat, score_params)