From: Valentine Wallace Date: Fri, 9 Apr 2021 20:55:10 +0000 (-0400) Subject: Rename timer_chan_freshness_every_min for uniformity with PeerManager X-Git-Tag: v0.0.14~28^2~1 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=5b4c3c603c5913c73cb232ef17c18dcf9907df38;p=rust-lightning Rename timer_chan_freshness_every_min for uniformity with PeerManager --- diff --git a/background-processor/src/lib.rs b/background-processor/src/lib.rs index 248870073..a670cf9cc 100644 --- a/background-processor/src/lib.rs +++ b/background-processor/src/lib.rs @@ -27,7 +27,7 @@ use std::time::{Duration, Instant}; /// * Monitoring whether the ChannelManager needs to be re-persisted to disk, and if so, /// writing it to disk/backups by invoking the callback given to it at startup. /// ChannelManager persistence should be done in the background. -/// * Calling `ChannelManager::timer_chan_freshness_every_min()` every minute (can be done in the +/// * Calling `ChannelManager::timer_tick_occurred()` every minute (can be done in the /// background). /// /// Note that if ChannelManager persistence fails and the persisted manager becomes out-of-date, @@ -102,8 +102,8 @@ impl BackgroundProcessor { return Ok(()); } if current_time.elapsed().as_secs() > CHAN_FRESHNESS_TIMER { - log_trace!(logger, "Calling manager's timer_chan_freshness_every_min"); - channel_manager.timer_chan_freshness_every_min(); + log_trace!(logger, "Calling manager's timer_tick_occurred"); + channel_manager.timer_tick_occurred(); current_time = Instant::now(); } } @@ -294,8 +294,8 @@ mod tests { } #[test] - fn test_chan_freshness_called() { - // Test that ChannelManager's `timer_chan_freshness_every_min` is called every + fn test_timer_tick_called() { + // Test that ChannelManager's `timer_tick_occurred` is called every // `CHAN_FRESHNESS_TIMER`. let nodes = create_nodes(1, "test_chan_freshness_called".to_string()); let data_dir = nodes[0].persister.get_data_dir(); @@ -303,7 +303,7 @@ mod tests { let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone()); loop { let log_entries = nodes[0].logger.lines.lock().unwrap(); - let desired_log = "Calling manager's timer_chan_freshness_every_min".to_string(); + let desired_log = "Calling manager's timer_tick_occurred".to_string(); if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() { break } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c13ac9b61..63f730781 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -250,7 +250,7 @@ pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; /// Liveness is called to fluctuate given peer disconnecton/monitor failures/closing. /// If channel is public, network should have a liveness view announced by us on a /// best-effort, which means we may filter out some status transitions to avoid spam. -/// See further timer_chan_freshness_every_min. +/// See further timer_tick_occurred. #[derive(PartialEq)] enum UpdateStatus { /// Status has been gossiped. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e87b22102..995aa822a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -338,7 +338,7 @@ pub(super) struct ChannelHolder { } /// Events which we process internally but cannot be procsesed immediately at the generation site -/// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with +/// for some reason. They are handled in timer_tick_occurred, so may be processed with /// quite some time lag. enum BackgroundEvent { /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder @@ -403,7 +403,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage /// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been /// offline for a full minute. In order to track this, you must call -/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect. +/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect. /// /// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager /// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but @@ -1959,10 +1959,10 @@ impl ChannelMana events.append(&mut new_events); } - /// Free the background events, generally called from timer_chan_freshness_every_min. + /// Free the background events, generally called from timer_tick_occurred. /// /// Exposed for testing to allow us to process events quickly without generating accidental - /// BroadcastChannelUpdate events in timer_chan_freshness_every_min. + /// BroadcastChannelUpdate events in timer_tick_occurred. /// /// Expects the caller to have a total_consistency_lock read lock. fn process_background_events(&self) { @@ -1991,7 +1991,7 @@ impl ChannelMana /// This method handles all the details, and must be called roughly once per minute. /// /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call. - pub fn timer_chan_freshness_every_min(&self) { + pub fn timer_tick_occurred(&self) { let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); self.process_background_events(); @@ -3274,7 +3274,7 @@ impl ChannelMana // We cannot broadcast our latest local state via monitor update (as // Channel::force_shutdown tries to make us do) as we may still be in initialization, // so we track the update internally and handle it when the user next calls - // timer_chan_freshness_every_min, guaranteeing we're running normally. + // timer_tick_occurred, guaranteeing we're running normally. if let Some((funding_txo, update)) = failure.0.take() { assert_eq!(update.updates.len(), 1); if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 22c0af407..2ab48831b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -7557,7 +7557,7 @@ fn test_check_htlc_underpaying() { #[test] fn test_announce_disable_channels() { - // Create 2 channels between A and B. Disconnect B. Call timer_chan_freshness_every_min and check for generated + // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -7573,8 +7573,8 @@ fn test_announce_disable_channels() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - nodes[0].node.timer_chan_freshness_every_min(); // dirty -> stagged - nodes[0].node.timer_chan_freshness_every_min(); // staged -> fresh + nodes[0].node.timer_tick_occurred(); // dirty -> stagged + nodes[0].node.timer_tick_occurred(); // staged -> fresh let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); for e in msg_events { @@ -7613,7 +7613,7 @@ fn test_announce_disable_channels() { nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); - nodes[0].node.timer_chan_freshness_every_min(); + nodes[0].node.timer_tick_occurred(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); }