]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Bump `hashbrown` dependency to 0.13
authorMatt Corallo <git@bluematt.me>
Thu, 18 Jan 2024 22:39:26 +0000 (22:39 +0000)
committerMatt Corallo <git@bluematt.me>
Fri, 2 Feb 2024 18:05:08 +0000 (18:05 +0000)
While this isn't expected to materially improve performance, it
does get us ahash 0.8, which allows us to reduce fuzzing
randomness, making our fuzzers much happier.

Sadly, by default `ahash` no longer tries to autodetect a
randomness source, so we cannot simply rely on `hashbrown` to do
randomization for us, but rather have to also explicitly depend on
`ahash`.

.github/workflows/build.yml
bench/Cargo.toml
ci/check-cfg-flags.py
fuzz/Cargo.toml
lightning-invoice/Cargo.toml
lightning/Cargo.toml
lightning/src/chain/onchaintx.rs
lightning/src/lib.rs
lightning/src/ln/channelmanager.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs

index cff105470d23d1455ca4479a23b63b365c29b4a0..ab10b36d25ae09a07d5f3f251b6991ff33f77ed0 100644 (file)
@@ -110,7 +110,7 @@ jobs:
         run: |
           cd lightning
           RUSTFLAGS="--cfg=require_route_graph_test" cargo test
-          RUSTFLAGS="--cfg=require_route_graph_test" cargo test --features hashbrown
+          RUSTFLAGS="--cfg=require_route_graph_test" cargo test --features hashbrown,ahash
           cd ..
       - name: Run benchmarks on Rust ${{ matrix.toolchain }}
         run: |
index 05354890c2a8fb4f5f0e9233fa273d444aac7cd9..ff254e1d9ccc97f671aa44374987e818162afca8 100644 (file)
@@ -9,7 +9,7 @@ name = "bench"
 harness = false
 
 [features]
-hashbrown = ["lightning/hashbrown"]
+hashbrown = ["lightning/hashbrown", "lightning/ahash"]
 
 [dependencies]
 lightning = { path = "../lightning", features = ["_test_utils", "criterion"] }
index 277ae1077baad7052fe379f1f8cb5cb1e1ff6f41..d6e8e0a90feff9862aaaccbe56271673d25b834d 100755 (executable)
@@ -13,6 +13,8 @@ def check_feature(feature):
         pass
     elif feature == "no-std":
         pass
+    elif feature == "ahash":
+        pass
     elif feature == "hashbrown":
         pass
     elif feature == "backtrace":
index 0c279a015c7398fe2ba0d8ba5e5b7ec49f2a154a..34c8704a7edcec08cb429b9341e00412d8f29520 100644 (file)
@@ -22,7 +22,7 @@ lightning = { path = "../lightning", features = ["regex", "hashbrown", "_test_ut
 lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
 bitcoin = { version = "0.30.2", features = ["secp-lowmemory"] }
 hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
-hashbrown = "0.8"
+hashbrown = "0.13"
 
 afl = { version = "0.12", optional = true }
 honggfuzz = { version = "0.5", optional = true, default-features = false }
index e0d3560c10b0b10d91b11415726726c867b2d76d..4376ac0dc70c943550600d8e748d5eb341473db8 100644 (file)
@@ -24,7 +24,7 @@ bech32 = { version = "0.9.0", default-features = false }
 lightning = { version = "0.0.121", path = "../lightning", default-features = false }
 secp256k1 = { version = "0.27.0", default-features = false, features = ["recovery", "alloc"] }
 num-traits = { version = "0.2.8", default-features = false }
-hashbrown = { version = "0.8", optional = true }
+hashbrown = { version = "0.13", optional = true }
 serde = { version = "1.0.118", optional = true }
 bitcoin = { version = "0.30.2", default-features = false }
 
index 1fe0f0dd11955a59beabdccdebe482e728d85949..8214193cc420cdc3f4de08ccf98094557d2c54cf 100644 (file)
@@ -31,7 +31,7 @@ unsafe_revoked_tx_signing = []
 # Override signing to not include randomness when generating signatures for test vectors.
 _test_vectors = []
 
-no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc", "libm"]
+no-std = ["hashbrown", "ahash", "bitcoin/no-std", "core2/alloc", "libm"]
 std = ["bitcoin/std"]
 
 # Generates low-r bitcoin signatures, which saves 1 byte in 50% of the cases
@@ -42,7 +42,8 @@ default = ["std", "grind_signatures"]
 [dependencies]
 bitcoin = { version = "0.30.2", default-features = false, features = ["secp-recovery"] }
 
-hashbrown = { version = "0.8", optional = true }
+hashbrown = { version = "0.13", optional = true }
+ahash = { version = "0.8", optional = true, default-features = false }
 hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
 regex = { version = "1.5.6", optional = true }
 backtrace = { version = "0.3", optional = true }
@@ -50,6 +51,16 @@ backtrace = { version = "0.3", optional = true }
 core2 = { version = "0.3.0", optional = true, default-features = false }
 libm = { version = "0.2", optional = true, default-features = false }
 
+# Because ahash no longer (kinda poorly) does it for us, (roughly) list out the targets that
+# getrandom supports and turn on ahash's `runtime-rng` feature for them.
+[target.'cfg(not(any(target_os = "unknown", target_os = "none")))'.dependencies]
+ahash = { version = "0.8", optional = true, default-features = false, features = ["runtime-rng"] }
+
+# Not sure what target_os gets set to for sgx, so to be safe always enable runtime-rng for x86_64
+# platforms (assuming LDK isn't being used on embedded x86-64 running directly on metal).
+[target.'cfg(target_arch = "x86_64")'.dependencies]
+ahash = { version = "0.8", optional = true, default-features = false, features = ["runtime-rng"] }
+
 [dev-dependencies]
 regex = "1.5.6"
 
index 5fe4bd2c1d2eb7f7aa2d774a8dec2e2ac8754828..05d59431fc3a815585554eae7ffd5323d72b4112 100644 (file)
@@ -686,7 +686,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                if let Some(claim_id) = claim_id {
                        if let Some(claim) = self.pending_claim_requests.remove(&claim_id) {
                                for outpoint in claim.outpoints() {
-                                       self.claimable_outpoints.remove(&outpoint);
+                                       self.claimable_outpoints.remove(outpoint);
                                }
                        }
                } else {
index 77b1f91cbc0a55acc2950da3419f6190485fec23..9bc158328701562954555286e18b0093ea14084b 100644 (file)
@@ -167,9 +167,19 @@ mod io_extras {
 mod prelude {
        #[cfg(feature = "hashbrown")]
        extern crate hashbrown;
+       #[cfg(feature = "ahash")]
+       extern crate ahash;
 
        pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque, boxed::Box};
 
+       pub use alloc::borrow::ToOwned;
+       pub use alloc::string::ToString;
+
+       // For no-std builds, we need to use hashbrown, however, by default, it doesn't randomize the
+       // hashing and is vulnerable to HashDoS attacks. Thus, when not fuzzing, we use its default
+       // ahash hashing algorithm but randomize, opting to not randomize when fuzzing to avoid false
+       // positive branch coverage.
+
        #[cfg(not(feature = "hashbrown"))]
        mod std_hashtables {
                pub(crate) use std::collections::{HashMap, HashSet, hash_map};
@@ -183,35 +193,85 @@ mod prelude {
        pub(crate) use std_hashtables::*;
 
        #[cfg(feature = "hashbrown")]
-       mod hashbrown_tables {
-               pub(crate) use hashbrown::{HashMap, HashSet, hash_map};
+       pub(crate) use self::hashbrown::hash_map;
+
+       #[cfg(all(feature = "hashbrown", fuzzing))]
+       mod nonrandomized_hashbrown {
+               pub(crate) use hashbrown::{HashMap, HashSet};
 
                pub(crate) type OccupiedHashMapEntry<'a, K, V> =
-                       hashbrown::hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>;
+                       hashbrown::hash_map::OccupiedEntry<'a, K, V, hashbrown::hash_map::DefaultHashBuilder>;
                pub(crate) type VacantHashMapEntry<'a, K, V> =
-                       hashbrown::hash_map::VacantEntry<'a, K, V, hash_map::DefaultHashBuilder>;
+                       hashbrown::hash_map::VacantEntry<'a, K, V, hashbrown::hash_map::DefaultHashBuilder>;
        }
-       #[cfg(feature = "hashbrown")]
-       pub(crate) use hashbrown_tables::*;
+       #[cfg(all(feature = "hashbrown", fuzzing))]
+       pub(crate) use nonrandomized_hashbrown::*;
 
-       pub(crate) fn new_hash_map<K: core::hash::Hash + Eq, V>() -> HashMap<K, V> { HashMap::new() }
-       pub(crate) fn hash_map_with_capacity<K: core::hash::Hash + Eq, V>(cap: usize) -> HashMap<K, V> {
-               HashMap::with_capacity(cap)
-       }
-       pub(crate) fn hash_map_from_iter<K: core::hash::Hash + Eq, V, I: IntoIterator<Item = (K, V)>>(iter: I) -> HashMap<K, V> {
-               HashMap::from_iter(iter)
-       }
 
-       pub(crate) fn new_hash_set<K: core::hash::Hash + Eq>() -> HashSet<K> { HashSet::new() }
-       pub(crate) fn hash_set_with_capacity<K: core::hash::Hash + Eq>(cap: usize) -> HashSet<K> {
-               HashSet::with_capacity(cap)
+       #[cfg(all(feature = "hashbrown", not(fuzzing)))]
+       mod randomized_hashtables {
+               use super::*;
+               use ahash::RandomState;
+
+               pub(crate) type HashMap<K, V> = hashbrown::HashMap<K, V, RandomState>;
+               pub(crate) type HashSet<K> = hashbrown::HashSet<K, RandomState>;
+
+               pub(crate) type OccupiedHashMapEntry<'a, K, V> =
+                       hashbrown::hash_map::OccupiedEntry<'a, K, V, RandomState>;
+               pub(crate) type VacantHashMapEntry<'a, K, V> =
+                       hashbrown::hash_map::VacantEntry<'a, K, V, RandomState>;
+
+               pub(crate) fn new_hash_map<K, V>() -> HashMap<K, V> {
+                       HashMap::with_hasher(RandomState::new())
+               }
+               pub(crate) fn hash_map_with_capacity<K, V>(cap: usize) -> HashMap<K, V> {
+                       HashMap::with_capacity_and_hasher(cap, RandomState::new())
+               }
+               pub(crate) fn hash_map_from_iter<K: core::hash::Hash + Eq, V, I: IntoIterator<Item=(K, V)>>(iter: I) -> HashMap<K, V> {
+                       let iter = iter.into_iter();
+                       let min_size = iter.size_hint().0;
+                       let mut res = HashMap::with_capacity_and_hasher(min_size, RandomState::new());
+                       res.extend(iter);
+                       res
+               }
+
+               pub(crate) fn new_hash_set<K>() -> HashSet<K> {
+                       HashSet::with_hasher(RandomState::new())
+               }
+               pub(crate) fn hash_set_with_capacity<K>(cap: usize) -> HashSet<K> {
+                       HashSet::with_capacity_and_hasher(cap, RandomState::new())
+               }
+               pub(crate) fn hash_set_from_iter<K: core::hash::Hash + Eq, I: IntoIterator<Item=K>>(iter: I) -> HashSet<K> {
+                       let iter = iter.into_iter();
+                       let min_size = iter.size_hint().0;
+                       let mut res = HashSet::with_capacity_and_hasher(min_size, RandomState::new());
+                       res.extend(iter);
+                       res
+               }
        }
-       pub(crate) fn hash_set_from_iter<K: core::hash::Hash + Eq, I: IntoIterator<Item = K>>(iter: I) -> HashSet<K> {
-               HashSet::from_iter(iter)
+
+       #[cfg(any(not(feature = "hashbrown"), fuzzing))]
+       mod randomized_hashtables {
+               use super::*;
+
+               pub(crate) fn new_hash_map<K, V>() -> HashMap<K, V> { HashMap::new() }
+               pub(crate) fn hash_map_with_capacity<K, V>(cap: usize) -> HashMap<K, V> {
+                       HashMap::with_capacity(cap)
+               }
+               pub(crate) fn hash_map_from_iter<K: core::hash::Hash + Eq, V, I: IntoIterator<Item=(K, V)>>(iter: I) -> HashMap<K, V> {
+                       HashMap::from_iter(iter)
+               }
+
+               pub(crate) fn new_hash_set<K>() -> HashSet<K> { HashSet::new() }
+               pub(crate) fn hash_set_with_capacity<K>(cap: usize) -> HashSet<K> {
+                       HashSet::with_capacity(cap)
+               }
+               pub(crate) fn hash_set_from_iter<K: core::hash::Hash + Eq, I: IntoIterator<Item=K>>(iter: I) -> HashSet<K> {
+                       HashSet::from_iter(iter)
+               }
        }
 
-       pub use alloc::borrow::ToOwned;
-       pub use alloc::string::ToString;
+       pub(crate) use randomized_hashtables::*;
 }
 
 #[cfg(all(not(ldk_bench), feature = "backtrace", feature = "std", test))]
index 4d7be7d92b273e295e0a21d43197e5737d7e1da7..1a4f6035aaa51e6e7432454e3eeb37c13fa91c3b 100644 (file)
@@ -5951,7 +5951,7 @@ where
                                // TODO: Once we can rely on the counterparty_node_id from the
                                // monitor event, this and the outpoint_to_peer map should be removed.
                                let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
-                               match outpoint_to_peer.get(&funding_txo) {
+                               match outpoint_to_peer.get(funding_txo) {
                                        Some(cp_id) => cp_id.clone(),
                                        None => return,
                                }
@@ -5964,7 +5964,7 @@ where
                peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let channel =
-                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
+                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
                                chan
                        } else {
                                let update_actions = peer_state.monitor_update_blocked_actions
@@ -11085,7 +11085,7 @@ where
                                                        downstream_counterparty_and_funding_outpoint:
                                                                Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
                                                } = action {
-                                                       if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+                                                       if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
                                                                log_trace!(logger,
                                                                        "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
                                                                        blocked_channel_id);
index 4214a1821cbbe7d110cf74ce44e77f0a8161e7fe..a429bf1eba903913f5b796fec595a1dd25f78232 100644 (file)
@@ -2566,9 +2566,9 @@ where L::Target: Logger {
                                let mut aggregate_path_contribution_msat = path_value_msat;
 
                                for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() {
-                                       let target = private_hop_key_cache.get(&prev_hop_id).unwrap();
+                                       let target = private_hop_key_cache.get(prev_hop_id).unwrap();
 
-                                       if let Some(first_channels) = first_hop_targets.get(&target) {
+                                       if let Some(first_channels) = first_hop_targets.get(target) {
                                                if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) {
                                                        log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.",
                                                                hop.short_channel_id);
@@ -2578,7 +2578,7 @@ where L::Target: Logger {
 
                                        let candidate = network_channels
                                                .get(&hop.short_channel_id)
-                                               .and_then(|channel| channel.as_directed_to(&target))
+                                               .and_then(|channel| channel.as_directed_to(target))
                                                .map(|(info, _)| CandidateRouteHop::PublicHop(PublicHopCandidate {
                                                        info,
                                                        short_channel_id: hop.short_channel_id,
@@ -2619,7 +2619,7 @@ where L::Target: Logger {
                                                .saturating_add(1);
 
                                        // Searching for a direct channel between last checked hop and first_hop_targets
-                                       if let Some(first_channels) = first_hop_targets.get_mut(&target) {
+                                       if let Some(first_channels) = first_hop_targets.get_mut(target) {
                                                sort_first_hop_channels(first_channels, &used_liquidities,
                                                        recommended_value_msat, our_node_pubkey);
                                                for details in first_channels {
index ec4921812e9521e182736621e10600924681cb64..bb043164a0e8895f8bbe95c8d0f50b7025d8365b 100644 (file)
@@ -1330,7 +1330,7 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreLookUp for Probabilistic
                        _ => return 0,
                };
                let source = candidate.source();
-               if let Some(penalty) = score_params.manual_node_penalties.get(&target) {
+               if let Some(penalty) = score_params.manual_node_penalties.get(target) {
                        return *penalty;
                }
 
@@ -1360,7 +1360,7 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreLookUp for Probabilistic
                let amount_msat = usage.amount_msat.saturating_add(usage.inflight_htlc_msat);
                let capacity_msat = usage.effective_capacity.as_msat();
                self.channel_liquidities
-                       .get(&scid)
+                       .get(scid)
                        .unwrap_or(&ChannelLiquidity::new(Duration::ZERO))
                        .as_directed(&source, &target, capacity_msat)
                        .penalty_msat(amount_msat, score_params)