Do not rely on auto-deref'ing when aaccessing a `Hash{Map,Set}`
authorMatt Corallo <git@bluematt.me>
Fri, 6 Jan 2023 20:05:07 +0000 (20:05 +0000)
committerMatt Corallo <git@bluematt.me>
Sun, 15 Jan 2023 23:32:08 +0000 (23:32 +0000)
In newer versions of `hashbrown` this code would be broken. While
we aren't updating `hashbrown` any time soon (as it requires an
MSRV bump), it is useful to swap for a newer `hashbrown` when
fuzzing, which this makes easier.

lightning/src/chain/onchaintx.rs
lightning/src/ln/channelmanager.rs

index d6884428ed92bfcc75f278a4bd29b39f73cf31f3..f526cc8aaa4aa85b2b720dd5f8de3b74c00ae06e 100644 (file)
@@ -476,8 +476,8 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                // remove it once it reaches the confirmation threshold, or to generate a new claim if the
                // transaction is reorged out.
                let mut all_inputs_have_confirmed_spend = true;
-               for outpoint in &request_outpoints {
-                       if let Some(first_claim_txid_height) = self.claimable_outpoints.get(outpoint) {
+               for outpoint in request_outpoints.iter() {
+                       if let Some(first_claim_txid_height) = self.claimable_outpoints.get(*outpoint) {
                                // We check for outpoint spends within claims individually rather than as a set
                                // since requests can have outpoints split off.
                                if !self.onchain_events_awaiting_threshold_conf.iter()
@@ -811,7 +811,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                                                        for outpoint in request.outpoints() {
                                                                log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.",
                                                                        outpoint, log_bytes!(package_id));
-                                                               self.claimable_outpoints.remove(&outpoint);
+                                                               self.claimable_outpoints.remove(outpoint);
                                                                #[cfg(anchors)]
                                                                self.pending_claim_events.remove(&package_id);
                                                        }
@@ -820,7 +820,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                                        OnchainEvent::ContentiousOutpoint { package } => {
                                                log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:");
                                                log_debug!(logger, " {:?}", package.outpoints());
-                                               self.claimable_outpoints.remove(&package.outpoints()[0]);
+                                               self.claimable_outpoints.remove(package.outpoints()[0]);
                                        }
                                }
                        } else {
@@ -898,7 +898,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                                //- resurect outpoint back in its claimable set and regenerate tx
                                match entry.event {
                                        OnchainEvent::ContentiousOutpoint { package } => {
-                                               if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&package.outpoints()[0]) {
+                                               if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(package.outpoints()[0]) {
                                                        if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
                                                                request.merge_package(package);
                                                                // Using a HashMap guarantee us than if we have multiple outpoints getting
index 1364e96a9fdf57a533a1bf31b7146f1849e43179..1ab2157356b18840951e743254453f0edfe5bbc5 100644 (file)
@@ -2102,7 +2102,7 @@ where
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
                                if let Some((err, mut code, chan_update)) = loop {
-                                       let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned();
+                                       let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned();
                                        let forwarding_chan_info_opt = match id_option {
                                                None => { // unknown_next_peer
                                                        // Note that this is likely a timing oracle for detecting whether an scid is a
@@ -7116,7 +7116,7 @@ where
                        }
                }
 
-               for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() {
+               for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
                        if !funding_txo_set.contains(funding_txo) {
                                log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
                                monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);