Merge pull request #875 from TheBlueMatt/2021-04-fix-bench
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 14 Apr 2021 01:51:33 +0000 (01:51 +0000)
committerGitHub <noreply@github.com>
Wed, 14 Apr 2021 01:51:33 +0000 (01:51 +0000)
Fix benchmark compile warnings and errors

background-processor/src/lib.rs
lightning-persister/src/lib.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs

index 248870073658ecb2b9119bcda45e2872a427fe4a..3f11f4e381656a2529e2828e6913abda678f1688 100644 (file)
@@ -27,7 +27,8 @@ use std::time::{Duration, Instant};
 /// * Monitoring whether the ChannelManager needs to be re-persisted to disk, and if so,
 ///   writing it to disk/backups by invoking the callback given to it at startup.
 ///   ChannelManager persistence should be done in the background.
-/// * Calling `ChannelManager::timer_chan_freshness_every_min()` every minute (can be done in the
+/// * Calling `ChannelManager::timer_tick_occurred()` and
+///   `PeerManager::timer_tick_occurred()` every minute (can be done in the
 ///   background).
 ///
 /// Note that if ChannelManager persistence fails and the persisted manager becomes out-of-date,
@@ -42,9 +43,9 @@ pub struct BackgroundProcessor {
 }
 
 #[cfg(not(test))]
-const CHAN_FRESHNESS_TIMER: u64 = 60;
+const FRESHNESS_TIMER: u64 = 60;
 #[cfg(test)]
-const CHAN_FRESHNESS_TIMER: u64 = 1;
+const FRESHNESS_TIMER: u64 = 1;
 
 impl BackgroundProcessor {
        /// Start a background thread that takes care of responsibilities enumerated in the top-level
@@ -101,9 +102,10 @@ impl BackgroundProcessor {
                                        log_trace!(logger, "Terminating background processor.");
                                        return Ok(());
                                }
-                               if current_time.elapsed().as_secs() > CHAN_FRESHNESS_TIMER {
-                                       log_trace!(logger, "Calling manager's timer_chan_freshness_every_min");
-                                       channel_manager.timer_chan_freshness_every_min();
+                               if current_time.elapsed().as_secs() > FRESHNESS_TIMER {
+                                       log_trace!(logger, "Calling ChannelManager's and PeerManager's timer_tick_occurred");
+                                       channel_manager.timer_tick_occurred();
+                                       peer_manager.timer_tick_occurred();
                                        current_time = Instant::now();
                                }
                        }
@@ -294,16 +296,16 @@ mod tests {
        }
 
        #[test]
-       fn test_chan_freshness_called() {
-               // Test that ChannelManager's `timer_chan_freshness_every_min` is called every
-               // `CHAN_FRESHNESS_TIMER`.
-               let nodes = create_nodes(1, "test_chan_freshness_called".to_string());
+       fn test_timer_tick_called() {
+               // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
+               // `FRESHNESS_TIMER`.
+               let nodes = create_nodes(1, "test_timer_tick_called".to_string());
                let data_dir = nodes[0].persister.get_data_dir();
                let callback = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
                let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
                loop {
                        let log_entries = nodes[0].logger.lines.lock().unwrap();
-                       let desired_log = "Calling manager's timer_chan_freshness_every_min".to_string();
+                       let desired_log = "Calling ChannelManager's and PeerManager's timer_tick_occurred".to_string();
                        if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() {
                                break
                        }
index 368945d1cfb489db52752a1e4ec6b4afedb0497d..f6fbc635d974fe9e4503958704fa7ec2b10e27e0 100644 (file)
@@ -12,7 +12,7 @@ extern crate lightning;
 extern crate bitcoin;
 extern crate libc;
 
-use bitcoin::{BlockHash, Txid};
+use bitcoin::hash_types::{BlockHash, Txid};
 use bitcoin::hashes::hex::{FromHex, ToHex};
 use crate::util::DiskWriteable;
 use lightning::chain;
@@ -24,12 +24,10 @@ use lightning::chain::transaction::OutPoint;
 use lightning::ln::channelmanager::ChannelManager;
 use lightning::util::logger::Logger;
 use lightning::util::ser::{ReadableArgs, Writeable};
-use std::collections::HashMap;
 use std::fs;
 use std::io::{Cursor, Error};
 use std::ops::Deref;
 use std::path::{Path, PathBuf};
-use std::sync::Arc;
 
 /// FilesystemPersister persists channel data on disk, where each channel's
 /// data is stored in a file named after its funding outpoint.
@@ -53,12 +51,13 @@ impl<Signer: Sign> DiskWriteable for ChannelMonitor<Signer> {
        }
 }
 
-impl<Signer: Sign, M, T, K, F, L> DiskWriteable for ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>
-where M: chain::Watch<Signer>,
-      T: BroadcasterInterface,
-      K: KeysInterface<Signer=Signer>,
-      F: FeeEstimator,
-      L: Logger,
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> DiskWriteable for ChannelManager<Signer, M, T, K, F, L>
+where
+       M::Target: chain::Watch<Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface<Signer=Signer>,
+       F::Target: FeeEstimator,
+       L::Target: Logger,
 {
        fn write_to_file(&self, writer: &mut fs::File) -> Result<(), std::io::Error> {
                self.write(writer)
@@ -87,16 +86,16 @@ impl FilesystemPersister {
 
        /// Writes the provided `ChannelManager` to the path provided at `FilesystemPersister`
        /// initialization, within a file called "manager".
-       pub fn persist_manager<Signer, M, T, K, F, L>(
+       pub fn persist_manager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
                data_dir: String,
-               manager: &ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>
+               manager: &ChannelManager<Signer, M, T, K, F, L>
        ) -> Result<(), std::io::Error>
-       where Signer: Sign,
-             M: chain::Watch<Signer>,
-             T: BroadcasterInterface,
-             K: KeysInterface<Signer=Signer>,
-             F: FeeEstimator,
-             L: Logger
+       where
+               M::Target: chain::Watch<Signer>,
+               T::Target: BroadcasterInterface,
+               K::Target: KeysInterface<Signer=Signer>,
+               F::Target: FeeEstimator,
+               L::Target: Logger,
        {
                let path = PathBuf::from(data_dir);
                util::write_to_file(path, "manager".to_string(), manager)
@@ -105,14 +104,14 @@ impl FilesystemPersister {
        /// Read `ChannelMonitor`s from disk.
        pub fn read_channelmonitors<Signer: Sign, K: Deref> (
                &self, keys_manager: K
-       ) -> Result<HashMap<OutPoint, (BlockHash, ChannelMonitor<Signer>)>, std::io::Error>
-            where K::Target: KeysInterface<Signer=Signer> + Sized
+       ) -> Result<Vec<(BlockHash, ChannelMonitor<Signer>)>, std::io::Error>
+               where K::Target: KeysInterface<Signer=Signer> + Sized,
        {
                let path = self.path_to_monitor_data();
                if !Path::new(&path).exists() {
-                       return Ok(HashMap::new());
+                       return Ok(Vec::new());
                }
-               let mut outpoint_to_channelmonitor = HashMap::new();
+               let mut res = Vec::new();
                for file_option in fs::read_dir(path).unwrap() {
                        let file = file_option.unwrap();
                        let owned_file_name = file.file_name();
@@ -144,10 +143,10 @@ impl FilesystemPersister {
                        let mut buffer = Cursor::new(&contents);
                        match <(BlockHash, ChannelMonitor<Signer>)>::read(&mut buffer, &*keys_manager) {
                                Ok((blockhash, channel_monitor)) => {
-                                       outpoint_to_channelmonitor.insert(
-                                               OutPoint { txid: txid.unwrap(), index: index.unwrap() },
-                                               (blockhash, channel_monitor),
-                                       );
+                                       if channel_monitor.get_funding_txo().0.txid != txid.unwrap() || channel_monitor.get_funding_txo().0.index != index.unwrap() {
+                                               return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "ChannelMonitor was stored in the wrong file"));
+                                       }
+                                       res.push((blockhash, channel_monitor));
                                }
                                Err(e) => return Err(std::io::Error::new(
                                        std::io::ErrorKind::InvalidData,
@@ -155,7 +154,7 @@ impl FilesystemPersister {
                                ))
                        }
                }
-               Ok(outpoint_to_channelmonitor)
+               Ok(res)
        }
 }
 
@@ -163,13 +162,13 @@ impl<ChannelSigner: Sign> channelmonitor::Persist<ChannelSigner> for FilesystemP
        fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
                let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
                util::write_to_file(self.path_to_monitor_data(), filename, monitor)
-                 .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
+                       .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
        }
 
        fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &ChannelMonitorUpdate, monitor: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
                let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
                util::write_to_file(self.path_to_monitor_data(), filename, monitor)
-                 .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
+                       .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
        }
 }
 
@@ -227,21 +226,21 @@ mod tests {
                // Check that the persisted channel data is empty before any channels are
                // open.
                let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap();
-               assert_eq!(persisted_chan_data_0.keys().len(), 0);
+               assert_eq!(persisted_chan_data_0.len(), 0);
                let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap();
-               assert_eq!(persisted_chan_data_1.keys().len(), 0);
+               assert_eq!(persisted_chan_data_1.len(), 0);
 
                // Helper to make sure the channel is on the expected update ID.
                macro_rules! check_persisted_data {
                        ($expected_update_id: expr) => {
                                persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap();
-                               assert_eq!(persisted_chan_data_0.keys().len(), 1);
-                               for (_, mon) in persisted_chan_data_0.values() {
+                               assert_eq!(persisted_chan_data_0.len(), 1);
+                               for (_, mon) in persisted_chan_data_0.iter() {
                                        assert_eq!(mon.get_latest_update_id(), $expected_update_id);
                                }
                                persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap();
-                               assert_eq!(persisted_chan_data_1.keys().len(), 1);
-                               for (_, mon) in persisted_chan_data_1.values() {
+                               assert_eq!(persisted_chan_data_1.len(), 1);
+                               for (_, mon) in persisted_chan_data_1.iter() {
                                        assert_eq!(mon.get_latest_update_id(), $expected_update_id);
                                }
                        }
index c13ac9b61ffc94b000f4204ea23baf8a14a0e6aa..63f730781a6b6a56e71f10586d6286f193b3a38c 100644 (file)
@@ -250,7 +250,7 @@ pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 /// Liveness is called to fluctuate given peer disconnecton/monitor failures/closing.
 /// If channel is public, network should have a liveness view announced by us on a
 /// best-effort, which means we may filter out some status transitions to avoid spam.
-/// See further timer_chan_freshness_every_min.
+/// See further timer_tick_occurred.
 #[derive(PartialEq)]
 enum UpdateStatus {
        /// Status has been gossiped.
index ae8db0356964785e0d1f3947cfb1c10a823d8daf..d2ea7dfea798baea181dec19838cf2b55ccaad22 100644 (file)
@@ -338,7 +338,7 @@ pub(super) struct ChannelHolder<Signer: Sign> {
 }
 
 /// Events which we process internally but cannot be procsesed immediately at the generation site
-/// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with
+/// for some reason. They are handled in timer_tick_occurred, so may be processed with
 /// quite some time lag.
 enum BackgroundEvent {
        /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
@@ -403,7 +403,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 /// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
 /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
 /// offline for a full minute. In order to track this, you must call
-/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect.
+/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
 ///
 /// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
 /// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
@@ -1959,10 +1959,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                events.append(&mut new_events);
        }
 
-       /// Free the background events, generally called from timer_chan_freshness_every_min.
+       /// Free the background events, generally called from timer_tick_occurred.
        ///
        /// Exposed for testing to allow us to process events quickly without generating accidental
-       /// BroadcastChannelUpdate events in timer_chan_freshness_every_min.
+       /// BroadcastChannelUpdate events in timer_tick_occurred.
        ///
        /// Expects the caller to have a total_consistency_lock read lock.
        fn process_background_events(&self) {
@@ -1991,7 +1991,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// This method handles all the details, and must be called roughly once per minute.
        ///
        /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
-       pub fn timer_chan_freshness_every_min(&self) {
+       pub fn timer_tick_occurred(&self) {
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
                self.process_background_events();
 
@@ -3274,7 +3274,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        // We cannot broadcast our latest local state via monitor update (as
                        // Channel::force_shutdown tries to make us do) as we may still be in initialization,
                        // so we track the update internally and handle it when the user next calls
-                       // timer_chan_freshness_every_min, guaranteeing we're running normally.
+                       // timer_tick_occurred, guaranteeing we're running normally.
                        if let Some((funding_txo, update)) = failure.0.take() {
                                assert_eq!(update.updates.len(), 1);
                                if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
index 22c0af4070b1b2fe25444e631347f118914bc46a..2ab48831b2cb8256bdcf3247ccc9edab03573fc3 100644 (file)
@@ -7557,7 +7557,7 @@ fn test_check_htlc_underpaying() {
 
 #[test]
 fn test_announce_disable_channels() {
-       // Create 2 channels between A and B. Disconnect B. Call timer_chan_freshness_every_min and check for generated
+       // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
        // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
 
        let chanmon_cfgs = create_chanmon_cfgs(2);
@@ -7573,8 +7573,8 @@ fn test_announce_disable_channels() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       nodes[0].node.timer_chan_freshness_every_min(); // dirty -> stagged
-       nodes[0].node.timer_chan_freshness_every_min(); // staged -> fresh
+       nodes[0].node.timer_tick_occurred(); // dirty -> stagged
+       nodes[0].node.timer_tick_occurred(); // staged -> fresh
        let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
        for e in msg_events {
@@ -7613,7 +7613,7 @@ fn test_announce_disable_channels() {
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
        handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
-       nodes[0].node.timer_chan_freshness_every_min();
+       nodes[0].node.timer_tick_occurred();
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 }