Order ChannelManager lock acquisition
authorJeffrey Czyz <jkczyz@gmail.com>
Thu, 4 Mar 2021 03:21:16 +0000 (19:21 -0800)
committerJeffrey Czyz <jkczyz@gmail.com>
Fri, 5 Mar 2021 03:08:42 +0000 (19:08 -0800)
Since last_block_hash is read while updating channels and written when
blocks are connected and disconnected, its lock should be held for the
duration of these operations. Further, since the channel_state lock is
already held during these operations, an ordering of lock acquisition
should be maintained to enforce consistency.

Ordering lock acquisition such that channel_state is held for the
duration of last_block_hash gives a consistent ordering.

lightning/src/ln/channelmanager.rs

index c1c5ae1b8ead79e49e15e31066263ab5d65763e6..30f745c3f14b2d6922576726732c17f82653ea8e 100644 (file)
@@ -3186,6 +3186,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
                let block_hash = header.block_hash();
                log_trace!(self.logger, "Block {} at height {} connected", block_hash, height);
+
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
                let mut failed_channels = Vec::new();
                let mut timed_out_htlcs = Vec::new();
@@ -3270,7 +3271,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                });
                                !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
                        });
+
+                       *self.last_block_hash.lock().unwrap() = block_hash;
+                       self.latest_block_height.store(height as usize, Ordering::Release);
                }
+
                for failure in failed_channels.drain(..) {
                        self.finish_force_close_channel(failure);
                }
@@ -3278,8 +3283,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
                }
-               self.latest_block_height.store(height as usize, Ordering::Release);
-               *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = block_hash;
+
                loop {
                        // Update last_node_announcement_serial to be the max of its current value and the
                        // block timestamp. This should keep us close to the current time without relying on
@@ -3322,12 +3326,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        true
                                }
                        });
+
+                       *self.last_block_hash.lock().unwrap() = header.block_hash();
+                       self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
                }
+
                for failure in failed_channels.drain(..) {
                        self.finish_force_close_channel(failure);
                }
-               self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
-               *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.block_hash();
        }
 
        /// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
@@ -3881,11 +3887,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                writer.write_all(&[SERIALIZATION_VERSION; 1])?;
                writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
 
+               let channel_state = self.channel_state.lock().unwrap();
+
                self.genesis_hash.write(writer)?;
                (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
                self.last_block_hash.lock().unwrap().write(writer)?;
 
-               let channel_state = self.channel_state.lock().unwrap();
                let mut unfunded_channels = 0;
                for (_, channel) in channel_state.by_id.iter() {
                        if !channel.is_funding_initiated() {