X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=fuzz%2Fsrc%2Fchanmon_consistency.rs;h=2a1b9e9a70a2fff9f4026879b26249ee79d89357;hb=0ce1c5a674f4ba27345634d529db5cdd758b8319;hp=83470bf8bc8e063075bce4a217b7de72fa9c1808;hpb=e9d9711de4ddc20b78eb110abfe400da6eef863d;p=rust-lightning diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 83470bf8..2a1b9e9a 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -125,7 +125,6 @@ struct TestChainMonitor { // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest // fully-serialized monitor state here, as well as the corresponding update_id. pub latest_monitors: Mutex)>>, - pub should_update_manager: atomic::AtomicBool, } impl TestChainMonitor { pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc, persister: Arc, keys: Arc) -> Self { @@ -135,18 +134,16 @@ impl TestChainMonitor { keys, persister, latest_monitors: Mutex::new(HashMap::new()), - should_update_manager: atomic::AtomicBool::new(false), } } } impl chain::Watch for TestChainMonitor { - fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> chain::ChannelMonitorUpdateStatus { + fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result { let mut ser = VecWriter(Vec::new()); monitor.write(&mut ser).unwrap(); if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) { panic!("Already had monitor pre-watch_channel"); } - self.should_update_manager.store(true, atomic::Ordering::Relaxed); self.chain_monitor.watch_channel(funding_txo, monitor) } @@ -158,11 +155,10 @@ impl chain::Watch for TestChainMonitor { }; let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1; - deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); + deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); - self.should_update_manager.store(true, atomic::Ordering::Relaxed); self.chain_monitor.update_channel(funding_txo, update) } @@ -375,8 +371,9 @@ fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, p channel_features: dest.channel_features(), fee_msat: amt, cltv_expiry_delta: 200, + maybe_announced_channel: true, }], blinded_tail: None }], - payment_params: None, + route_params: None, }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) { check_payment_err(err, amt > max_value_sendable || amt < min_value_sendable); false @@ -409,15 +406,17 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des channel_features: middle.channel_features(), fee_msat: first_hop_fee, cltv_expiry_delta: 100, - },RouteHop { + maybe_announced_channel: true, + }, RouteHop { pubkey: dest.get_our_node_id(), node_features: dest.node_features(), short_channel_id: dest_chan_id, channel_features: dest.channel_features(), fee_msat: amt, cltv_expiry_delta: 200, + maybe_announced_channel: true, }], blinded_tail: None }], - payment_params: None, + route_params: None, }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) { let sent_amt = amt + first_hop_fee; check_payment_err(err, sent_amt < min_value_sendable || sent_amt > max_value_sendable); @@ -501,7 +500,7 @@ pub fn do_test(data: &[u8], underlying_out: Out) { let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone()); for (funding_txo, mon) in monitors.drain() { assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon), - ChannelMonitorUpdateStatus::Completed); + Ok(ChannelMonitorUpdateStatus::Completed)); } res } } @@ -1101,11 +1100,9 @@ pub fn do_test(data: &[u8], underlying_out: Out) { if !chan_a_disconnected { nodes[1].peer_disconnected(&nodes[0].get_our_node_id()); chan_a_disconnected = true; - drain_msg_events_on_disconnect!(0); - } - if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) { - node_a_ser.0.clear(); - nodes[0].write(&mut node_a_ser).unwrap(); + push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0)); + ab_events.clear(); + ba_events.clear(); } let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a); nodes[0] = new_node_a; @@ -1134,11 +1131,9 @@ pub fn do_test(data: &[u8], underlying_out: Out) { if !chan_b_disconnected { nodes[1].peer_disconnected(&nodes[2].get_our_node_id()); chan_b_disconnected = true; - drain_msg_events_on_disconnect!(2); - } - if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) { - node_c_ser.0.clear(); - nodes[2].write(&mut node_c_ser).unwrap(); + push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2)); + bc_events.clear(); + cb_events.clear(); } let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c); nodes[2] = new_node_c; @@ -1304,15 +1299,18 @@ pub fn do_test(data: &[u8], underlying_out: Out) { _ => test_return!(), } - node_a_ser.0.clear(); - nodes[0].write(&mut node_a_ser).unwrap(); - monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed); - node_b_ser.0.clear(); - nodes[1].write(&mut node_b_ser).unwrap(); - monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed); - node_c_ser.0.clear(); - nodes[2].write(&mut node_c_ser).unwrap(); - monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed); + if nodes[0].get_and_clear_needs_persistence() == true { + node_a_ser.0.clear(); + nodes[0].write(&mut node_a_ser).unwrap(); + } + if nodes[1].get_and_clear_needs_persistence() == true { + node_b_ser.0.clear(); + nodes[1].write(&mut node_b_ser).unwrap(); + } + if nodes[2].get_and_clear_needs_persistence() == true { + node_c_ser.0.clear(); + nodes[2].write(&mut node_c_ser).unwrap(); + } } }