From 5c3fa553a10be74a123b9530da4aec2342164d97 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Aug 2023 01:35:16 +0000 Subject: [PATCH] Remove largely useless checks in chanmon_consistency fuzzer When reloading nodes A or C, the chanmon_consistency fuzzer currently calls `get_and_clear_pending_msg_events` on the node, potentially causing additional `ChannelMonitor` or `ChannelManager` updates, just to check that no unexpected messages are generated. There's not much reason to do so, the fuzzer could always swap for a different command to call the same method, and the additional checking requires some weird monitor persistence introspection. Here we simplify the fuzzer by simply removing this logic. --- fuzz/src/chanmon_consistency.rs | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4c79f0be..05df0911 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -125,7 +125,6 @@ struct TestChainMonitor { // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest // fully-serialized monitor state here, as well as the corresponding update_id. pub latest_monitors: Mutex)>>, - pub should_update_manager: atomic::AtomicBool, } impl TestChainMonitor { pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc, persister: Arc, keys: Arc) -> Self { @@ -135,7 +134,6 @@ impl TestChainMonitor { keys, persister, latest_monitors: Mutex::new(HashMap::new()), - should_update_manager: atomic::AtomicBool::new(false), } } } @@ -146,7 +144,6 @@ impl chain::Watch for TestChainMonitor { if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) { panic!("Already had monitor pre-watch_channel"); } - self.should_update_manager.store(true, atomic::Ordering::Relaxed); self.chain_monitor.watch_channel(funding_txo, monitor) } @@ -162,7 +159,6 @@ impl chain::Watch for TestChainMonitor { let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); - self.should_update_manager.store(true, atomic::Ordering::Relaxed); self.chain_monitor.update_channel(funding_txo, update) } @@ -1101,11 +1097,9 @@ pub fn do_test(data: &[u8], underlying_out: Out) { if !chan_a_disconnected { nodes[1].peer_disconnected(&nodes[0].get_our_node_id()); chan_a_disconnected = true; - drain_msg_events_on_disconnect!(0); - } - if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) { - node_a_ser.0.clear(); - nodes[0].write(&mut node_a_ser).unwrap(); + push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0)); + ab_events.clear(); + ba_events.clear(); } let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a); nodes[0] = new_node_a; @@ -1134,11 +1128,9 @@ pub fn do_test(data: &[u8], underlying_out: Out) { if !chan_b_disconnected { nodes[1].peer_disconnected(&nodes[2].get_our_node_id()); chan_b_disconnected = true; - drain_msg_events_on_disconnect!(2); - } - if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) { - node_c_ser.0.clear(); - nodes[2].write(&mut node_c_ser).unwrap(); + push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2)); + bc_events.clear(); + cb_events.clear(); } let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c); nodes[2] = new_node_c; @@ -1306,13 +1298,10 @@ pub fn do_test(data: &[u8], underlying_out: Out) { node_a_ser.0.clear(); nodes[0].write(&mut node_a_ser).unwrap(); - monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed); node_b_ser.0.clear(); nodes[1].write(&mut node_b_ser).unwrap(); - monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed); node_c_ser.0.clear(); nodes[2].write(&mut node_c_ser).unwrap(); - monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed); } } -- 2.30.2