]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Use `test`/`_test_utils` to enable single-threaded debug assertions 2023-01-no-debug-panics
authorMatt Corallo <git@bluematt.me>
Tue, 17 Jan 2023 23:40:44 +0000 (23:40 +0000)
committerMatt Corallo <git@bluematt.me>
Tue, 17 Jan 2023 23:47:45 +0000 (23:47 +0000)
We have a number of debug assertions which are expected to never
fire when running in a single thread. This is just fine in tests,
and gives us good coverage of our lockorder requirements, but is
not-irregularly surprising to users, who may run with their own
debug assertions in test environments.

Instead, we gate these checks by the `cfg(test)` setting as well as
the `_test_utils` feature, ensuring they run in our own tests, but
not downstream tests.

lightning-net-tokio/src/lib.rs
lightning/src/ln/channelmanager.rs

index bd6f13db85cc56e0ae5332ed810e4f3e30feb03c..39452cff034ffc27ba6118814efaf3f480d7c579 100644 (file)
@@ -286,7 +286,7 @@ pub fn setup_inbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Peer
 {
        let remote_addr = get_addr_from_stream(&stream);
        let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
-       #[cfg(debug_assertions)]
+       #[cfg(test)]
        let last_us = Arc::clone(&us);
 
        let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
@@ -307,8 +307,8 @@ pub fn setup_inbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Peer
                                // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
                                // keep too many wakers around, this makes sense. The race should be rare (we do
                                // some work after shutdown()) and an error would be a major memory leak.
-                               #[cfg(debug_assertions)]
-                               assert!(Arc::try_unwrap(last_us).is_ok());
+                               #[cfg(test)]
+                               debug_assert!(Arc::try_unwrap(last_us).is_ok());
                        }
                }
        }
@@ -335,7 +335,7 @@ pub fn setup_outbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Pee
 {
        let remote_addr = get_addr_from_stream(&stream);
        let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
-       #[cfg(debug_assertions)]
+       #[cfg(test)]
        let last_us = Arc::clone(&us);
        let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
                Some(tokio::spawn(async move {
@@ -381,8 +381,8 @@ pub fn setup_outbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::Pee
                                // socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
                                // keep too many wakers around, this makes sense. The race should be rare (we do
                                // some work after shutdown()) and an error would be a major memory leak.
-                               #[cfg(debug_assertions)]
-                               assert!(Arc::try_unwrap(last_us).is_ok());
+                               #[cfg(test)]
+                               debug_assert!(Arc::try_unwrap(last_us).is_ok());
                        }
                }
        }
index 5b312f04e4a13ba987539a8bde2481f42cd20550..c5b9f924d82927d24f69c1bd3b820528d080cb32 100644 (file)
@@ -1153,12 +1153,12 @@ macro_rules! handle_error {
                match $internal {
                        Ok(msg) => Ok(msg),
                        Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
-                               #[cfg(debug_assertions)]
+                               #[cfg(any(feature = "_test_utils", test))]
                                {
                                        // In testing, ensure there are no deadlocks where the lock is already held upon
                                        // entering the macro.
-                                       assert!($self.pending_events.try_lock().is_ok());
-                                       assert!($self.per_peer_state.try_write().is_ok());
+                                       debug_assert!($self.pending_events.try_lock().is_ok());
+                                       debug_assert!($self.per_peer_state.try_write().is_ok());
                                }
 
                                let mut msg_events = Vec::with_capacity(2);
@@ -1193,7 +1193,7 @@ macro_rules! handle_error {
                                                let mut peer_state = peer_state_mutex.lock().unwrap();
                                                peer_state.pending_msg_events.append(&mut msg_events);
                                        }
-                                       #[cfg(debug_assertions)]
+                                       #[cfg(any(feature = "_test_utils", test))]
                                        {
                                                if let None = per_peer_state.get(&$counterparty_node_id) {
                                                        // This shouldn't occour in tests unless an unkown counterparty_node_id
@@ -1206,10 +1206,10 @@ macro_rules! handle_error {
                                                                => {
                                                                        assert_eq!(*data, expected_error_str);
                                                                        if let Some((err_channel_id, _user_channel_id)) = chan_id {
-                                                                               assert_eq!(*channel_id, err_channel_id);
+                                                                               debug_assert_eq!(*channel_id, err_channel_id);
                                                                        }
                                                                }
-                                                               _ => panic!("Unexpected event"),
+                                                               _ => debug_assert!(false, "Unexpected event"),
                                                        }
                                                }
                                        }
@@ -3565,7 +3565,7 @@ where
        /// Fails an HTLC backwards to the sender of it to us.
        /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
        fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
-               #[cfg(debug_assertions)]
+               #[cfg(any(feature = "_test_utils", test))]
                {
                        // Ensure that no peer state channel storage lock is not held when calling this
                        // function.
@@ -3574,7 +3574,7 @@ where
                        // this function with any `per_peer_state` peer lock aquired would.
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_, peer) in per_peer_state.iter() {
-                               assert!(peer.try_lock().is_ok());
+                               debug_assert!(peer.try_lock().is_ok());
                        }
                }