{
let remote_addr = get_addr_from_stream(&stream);
let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
- #[cfg(debug_assertions)]
+ #[cfg(test)]
let last_us = Arc::clone(&us);
let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
// keep too many wakers around, this makes sense. The race should be rare (we do
// some work after shutdown()) and an error would be a major memory leak.
- #[cfg(debug_assertions)]
- assert!(Arc::try_unwrap(last_us).is_ok());
+ #[cfg(test)]
+ debug_assert!(Arc::try_unwrap(last_us).is_ok());
}
}
}
{
let remote_addr = get_addr_from_stream(&stream);
let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
- #[cfg(debug_assertions)]
+ #[cfg(test)]
let last_us = Arc::clone(&us);
let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
Some(tokio::spawn(async move {
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
// keep too many wakers around, this makes sense. The race should be rare (we do
// some work after shutdown()) and an error would be a major memory leak.
- #[cfg(debug_assertions)]
- assert!(Arc::try_unwrap(last_us).is_ok());
+ #[cfg(test)]
+ debug_assert!(Arc::try_unwrap(last_us).is_ok());
}
}
}
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
- #[cfg(debug_assertions)]
+ #[cfg(any(feature = "_test_utils", test))]
{
// In testing, ensure there are no deadlocks where the lock is already held upon
// entering the macro.
- assert!($self.pending_events.try_lock().is_ok());
- assert!($self.per_peer_state.try_write().is_ok());
+ debug_assert!($self.pending_events.try_lock().is_ok());
+ debug_assert!($self.per_peer_state.try_write().is_ok());
}
let mut msg_events = Vec::with_capacity(2);
let mut peer_state = peer_state_mutex.lock().unwrap();
peer_state.pending_msg_events.append(&mut msg_events);
}
- #[cfg(debug_assertions)]
+ #[cfg(any(feature = "_test_utils", test))]
{
if let None = per_peer_state.get(&$counterparty_node_id) {
// This shouldn't occour in tests unless an unkown counterparty_node_id
=> {
assert_eq!(*data, expected_error_str);
if let Some((err_channel_id, _user_channel_id)) = chan_id {
- assert_eq!(*channel_id, err_channel_id);
+ debug_assert_eq!(*channel_id, err_channel_id);
}
}
- _ => panic!("Unexpected event"),
+ _ => debug_assert!(false, "Unexpected event"),
}
}
}
/// Fails an HTLC backwards to the sender of it to us.
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
- #[cfg(debug_assertions)]
+ #[cfg(any(feature = "_test_utils", test))]
{
// Ensure that no peer state channel storage lock is not held when calling this
// function.
// this function with any `per_peer_state` peer lock aquired would.
let per_peer_state = self.per_peer_state.read().unwrap();
for (_, peer) in per_peer_state.iter() {
- assert!(peer.try_lock().is_ok());
+ debug_assert!(peer.try_lock().is_ok());
}
}