Rework `chain::Watch` return types to make async updates less scary
[rust-lightning] / fuzz / src / chanmon_consistency.rs
index 372bed6049370c065c0d7a660f61a466ec715bd4..7944d04ad1c33d1b4de0aed044f429df9f1c7860 100644 (file)
@@ -32,15 +32,14 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hash_types::{BlockHash, WPubkeyHash};
 
 use lightning::chain;
-use lightning::chain::{BestBlock, ChannelMonitorUpdateErr, chainmonitor, channelmonitor, Confirm, Watch};
+use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch};
 use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
 use lightning::chain::transaction::OutPoint;
 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
 use lightning::chain::keysinterface::{KeyMaterial, KeysInterface, InMemorySigner, Recipient};
 use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-use lightning::ln::channelmanager::{ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs};
+use lightning::ln::channelmanager::{self, ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs};
 use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
-use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
 use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
 use lightning::ln::script::ShutdownScript;
 use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
@@ -125,7 +124,7 @@ impl TestChainMonitor {
        }
 }
 impl chain::Watch<EnforcingSigner> for TestChainMonitor {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
                let mut ser = VecWriter(Vec::new());
                monitor.write(&mut ser).unwrap();
                if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
@@ -135,7 +134,7 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
                self.chain_monitor.watch_channel(funding_txo, monitor)
        }
 
-       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
                let mut map_lock = self.latest_monitors.lock().unwrap();
                let mut map_entry = match map_lock.entry(funding_txo) {
                        hash_map::Entry::Occupied(entry) => entry,
@@ -315,9 +314,9 @@ fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, p
        if let Err(err) = source.send_payment(&Route {
                paths: vec![vec![RouteHop {
                        pubkey: dest.get_our_node_id(),
-                       node_features: NodeFeatures::known(),
+                       node_features: channelmanager::provided_node_features(),
                        short_channel_id: dest_chan_id,
-                       channel_features: ChannelFeatures::known(),
+                       channel_features: channelmanager::provided_channel_features(),
                        fee_msat: amt,
                        cltv_expiry_delta: 200,
                }]],
@@ -334,16 +333,16 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des
        if let Err(err) = source.send_payment(&Route {
                paths: vec![vec![RouteHop {
                        pubkey: middle.get_our_node_id(),
-                       node_features: NodeFeatures::known(),
+                       node_features: channelmanager::provided_node_features(),
                        short_channel_id: middle_chan_id,
-                       channel_features: ChannelFeatures::known(),
+                       channel_features: channelmanager::provided_channel_features(),
                        fee_msat: 50000,
                        cltv_expiry_delta: 100,
                },RouteHop {
                        pubkey: dest.get_our_node_id(),
-                       node_features: NodeFeatures::known(),
+                       node_features: channelmanager::provided_node_features(),
                        short_channel_id: dest_chan_id,
-                       channel_features: ChannelFeatures::known(),
+                       channel_features: channelmanager::provided_channel_features(),
                        fee_msat: amt,
                        cltv_expiry_delta: 200,
                }]],
@@ -364,7 +363,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
                        let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) });
                        let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
-                               Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(&keys_manager)));
+                               Arc::new(TestPersister {
+                                       update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
+                               }), Arc::clone(&keys_manager)));
 
                        let mut config = UserConfig::default();
                        config.channel_config.forwarding_fee_proportional_millionths = 0;
@@ -384,7 +385,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                    let keys_manager = Arc::clone(& $keys_manager);
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
                        let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
-                               Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(& $keys_manager)));
+                               Arc::new(TestPersister {
+                                       update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
+                               }), Arc::clone(& $keys_manager)));
 
                        let mut config = UserConfig::default();
                        config.channel_config.forwarding_fee_proportional_millionths = 0;
@@ -413,7 +416,8 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
                        let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone());
                        for (funding_txo, mon) in monitors.drain() {
-                               assert!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon).is_ok());
+                               assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon),
+                                       ChannelMonitorUpdateStatus::Completed);
                        }
                        res
                } }
@@ -422,8 +426,8 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
        let mut channel_txn = Vec::new();
        macro_rules! make_channel {
                ($source: expr, $dest: expr, $chan_id: expr) => { {
-                       $source.peer_connected(&$dest.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
-                       $dest.peer_connected(&$source.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                       $source.peer_connected(&$dest.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+                       $dest.peer_connected(&$source.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
 
                        $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
                        let open_channel = {
@@ -434,7 +438,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                } else { panic!("Wrong event type"); }
                        };
 
-                       $dest.handle_open_channel(&$source.get_our_node_id(), InitFeatures::known(), &open_channel);
+                       $dest.handle_open_channel(&$source.get_our_node_id(), channelmanager::provided_init_features(), &open_channel);
                        let accept_channel = {
                                let events = $dest.get_and_clear_pending_msg_events();
                                assert_eq!(events.len(), 1);
@@ -443,7 +447,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                } else { panic!("Wrong event type"); }
                        };
 
-                       $source.handle_accept_channel(&$dest.get_our_node_id(), InitFeatures::known(), &accept_channel);
+                       $source.handle_accept_channel(&$dest.get_our_node_id(), channelmanager::provided_init_features(), &accept_channel);
                        let funding_output;
                        {
                                let events = $source.get_and_clear_pending_events();
@@ -890,12 +894,12 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        // bit-twiddling mutations to have similar effects. This is probably overkill, but no
                        // harm in doing so.
 
-                       0x00 => *monitor_a.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
-                       0x01 => *monitor_b.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
-                       0x02 => *monitor_c.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
-                       0x04 => *monitor_a.persister.update_ret.lock().unwrap() = Ok(()),
-                       0x05 => *monitor_b.persister.update_ret.lock().unwrap() = Ok(()),
-                       0x06 => *monitor_c.persister.update_ret.lock().unwrap() = Ok(()),
+                       0x00 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+                       0x01 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+                       0x02 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+                       0x04 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
+                       0x05 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
+                       0x06 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
 
                        0x08 => {
                                if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
@@ -940,15 +944,15 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        },
                        0x0e => {
                                if chan_a_disconnected {
-                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
-                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
                                        chan_a_disconnected = false;
                                }
                        },
                        0x0f => {
                                if chan_b_disconnected {
-                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
-                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
                                        chan_b_disconnected = false;
                                }
                        },
@@ -1120,9 +1124,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                // after we resolve all pending events.
                                // First make sure there are no pending monitor updates, resetting the error state
                                // and calling force_channel_monitor_updated for each monitor.
-                               *monitor_a.persister.update_ret.lock().unwrap() = Ok(());
-                               *monitor_b.persister.update_ret.lock().unwrap() = Ok(());
-                               *monitor_c.persister.update_ret.lock().unwrap() = Ok(());
+                               *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
+                               *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
+                               *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
 
                                if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
                                        monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
@@ -1143,13 +1147,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
                                // Next, make sure peers are all connected to each other
                                if chan_a_disconnected {
-                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
-                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
                                        chan_a_disconnected = false;
                                }
                                if chan_b_disconnected {
-                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
-                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
                                        chan_b_disconnected = false;
                                }