]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Build `per_peer_state` immediately in `ChannelManager` deser
authorMatt Corallo <git@bluematt.me>
Fri, 4 Oct 2024 17:54:00 +0000 (17:54 +0000)
committerMatt Corallo <git@bluematt.me>
Thu, 10 Oct 2024 14:30:12 +0000 (14:30 +0000)
Instead of first building a map from peers to a list of channels
then pulling out of that to build the `per_peer_state`, we build
`per_peer_state` immediately and store channels in it immediately.

This avoids an unnecessary map indirection but also gives us
access to the new fields in `per_peer_state` when reading
`Channel`s which we'll need in a coming commit.

lightning/src/ln/channelmanager.rs
lightning/src/sync/debug_sync.rs
lightning/src/sync/nostd_sync.rs

index 19c512c2a48b9ad031e91dfede0047cd661d0a10..2cc12f5792b206c1c49379ab254f805ed336a546 100644 (file)
@@ -12248,11 +12248,23 @@ where
                let best_block_height: u32 = Readable::read(reader)?;
                let best_block_hash: BlockHash = Readable::read(reader)?;
 
-               let mut failed_htlcs = Vec::new();
+               let empty_peer_state = || {
+                       PeerState {
+                               channel_by_id: new_hash_map(),
+                               inbound_channel_request_by_id: new_hash_map(),
+                               latest_features: InitFeatures::empty(),
+                               pending_msg_events: Vec::new(),
+                               in_flight_monitor_updates: BTreeMap::new(),
+                               monitor_update_blocked_actions: BTreeMap::new(),
+                               actions_blocking_raa_monitor_updates: BTreeMap::new(),
+                               is_connected: false,
+                       }
+               };
 
+               let mut failed_htlcs = Vec::new();
                let channel_count: u64 = Readable::read(reader)?;
                let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
-               let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
+               let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
                let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
@@ -12340,17 +12352,10 @@ where
                                        if let Some(funding_txo) = channel.context.get_funding_txo() {
                                                outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
                                        }
-                                       match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
-                                               hash_map::Entry::Occupied(mut entry) => {
-                                                       let by_id_map = entry.get_mut();
-                                                       by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
-                                               },
-                                               hash_map::Entry::Vacant(entry) => {
-                                                       let mut by_id_map = new_hash_map();
-                                                       by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
-                                                       entry.insert(by_id_map);
-                                               }
-                                       }
+                                       per_peer_state.entry(channel.context.get_counterparty_node_id())
+                                               .or_insert_with(|| Mutex::new(empty_peer_state()))
+                                               .get_mut().unwrap()
+                                               .channel_by_id.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                }
                        } else if channel.is_awaiting_initial_mon_persist() {
                                // If we were persisted and shut down while the initial ChannelMonitor persistence
@@ -12417,27 +12422,13 @@ where
                        claimable_htlcs_list.push((payment_hash, previous_hops));
                }
 
-               let peer_state_from_chans = |channel_by_id| {
-                       PeerState {
-                               channel_by_id,
-                               inbound_channel_request_by_id: new_hash_map(),
-                               latest_features: InitFeatures::empty(),
-                               pending_msg_events: Vec::new(),
-                               in_flight_monitor_updates: BTreeMap::new(),
-                               monitor_update_blocked_actions: BTreeMap::new(),
-                               actions_blocking_raa_monitor_updates: BTreeMap::new(),
-                               is_connected: false,
-                       }
-               };
-
                let peer_count: u64 = Readable::read(reader)?;
-               let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
                for _ in 0..peer_count {
-                       let peer_pubkey = Readable::read(reader)?;
-                       let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
-                       let mut peer_state = peer_state_from_chans(peer_chans);
-                       peer_state.latest_features = Readable::read(reader)?;
-                       per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
+                       let peer_pubkey: PublicKey = Readable::read(reader)?;
+                       let latest_features = Readable::read(reader)?;
+                       if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
+                               peer_state.get_mut().unwrap().latest_features = latest_features;
+                       }
                }
 
                let event_count: u64 = Readable::read(reader)?;
@@ -12649,7 +12640,7 @@ where
                                        // still open, we need to replay any monitor updates that are for closed channels,
                                        // creating the neccessary peer_state entries as we go.
                                        let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
-                                               Mutex::new(peer_state_from_chans(new_hash_map()))
+                                               Mutex::new(empty_peer_state())
                                        });
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
index 776e35e8ce014d307dc169364abd381008a804fb..183c074701e7c89bc5dfb36dbd44bc9148041073 100644 (file)
@@ -311,6 +311,10 @@ impl<T> Mutex<T> {
                }
                res
        }
+
+       pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> {
+               self.inner.get_mut().map_err(|_| ())
+       }
 }
 
 impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
index 03fa65b69c674206341c70d1108a9aab4efac907..b3963da762e6ee53aaa49a5240b655c4d4662871 100644 (file)
@@ -40,6 +40,10 @@ impl<T> Mutex<T> {
        pub fn into_inner(self) -> LockResult<T> {
                Ok(self.inner.into_inner())
        }
+
+       pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> {
+               Ok(self.inner.get_mut())
+       }
 }
 
 impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {