Merge pull request #1019 from jkczyz/2021-07-shutdown-pubkey
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 9 Aug 2021 21:41:02 +0000 (21:41 +0000)
committerGitHub <noreply@github.com>
Mon, 9 Aug 2021 21:41:02 +0000 (21:41 +0000)
Fetch shutdown script based on `commit_upfront_shutdown_pubkey`

lightning-background-processor/src/lib.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/peer_handler.rs

index 7c1da8cd9d3a1178e00a98eb7bfcf9cb09b00246..e73ddeb709c11aca96ea04887c4eca2cb6cea388 100644 (file)
@@ -39,6 +39,7 @@ use std::ops::Deref;
 /// then there is a risk of channels force-closing on startup when the manager realizes it's
 /// outdated. However, as long as `ChannelMonitor` backups are sound, no funds besides those used
 /// for unilateral chain closure fees are at risk.
+#[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
 pub struct BackgroundProcessor {
        stop_thread: Arc<AtomicBool>,
        thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
@@ -49,6 +50,8 @@ const FRESHNESS_TIMER: u64 = 60;
 #[cfg(test)]
 const FRESHNESS_TIMER: u64 = 1;
 
+const PING_TIMER: u64 = 5;
+
 /// Trait which handles persisting a [`ChannelManager`] to disk.
 ///
 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
@@ -137,7 +140,8 @@ impl BackgroundProcessor {
                let stop_thread = Arc::new(AtomicBool::new(false));
                let stop_thread_clone = stop_thread.clone();
                let handle = thread::spawn(move || -> Result<(), std::io::Error> {
-                       let mut current_time = Instant::now();
+                       let mut last_freshness_call = Instant::now();
+                       let mut last_ping_call = Instant::now();
                        loop {
                                peer_manager.process_events();
                                channel_manager.process_pending_events(&event_handler);
@@ -152,11 +156,27 @@ impl BackgroundProcessor {
                                        log_trace!(logger, "Terminating background processor.");
                                        return Ok(());
                                }
-                               if current_time.elapsed().as_secs() > FRESHNESS_TIMER {
-                                       log_trace!(logger, "Calling ChannelManager's and PeerManager's timer_tick_occurred");
+                               if last_freshness_call.elapsed().as_secs() > FRESHNESS_TIMER {
+                                       log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
                                        channel_manager.timer_tick_occurred();
+                                       last_freshness_call = Instant::now();
+                               }
+                               if last_ping_call.elapsed().as_secs() > PING_TIMER * 2 {
+                                       // On various platforms, we may be starved of CPU cycles for several reasons.
+                                       // E.g. on iOS, if we've been in the background, we will be entirely paused.
+                                       // Similarly, if we're on a desktop platform and the device has been asleep, we
+                                       // may not get any cycles.
+                                       // In any case, if we've been entirely paused for more than double our ping
+                                       // timer, we should have disconnected all sockets by now (and they're probably
+                                       // dead anyway), so disconnect them by calling `timer_tick_occurred()` twice.
+                                       log_trace!(logger, "Awoke after more than double our ping timer, disconnecting peers.");
+                                       peer_manager.timer_tick_occurred();
+                                       peer_manager.timer_tick_occurred();
+                                       last_ping_call = Instant::now();
+                               } else if last_ping_call.elapsed().as_secs() > PING_TIMER {
+                                       log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
                                        peer_manager.timer_tick_occurred();
-                                       current_time = Instant::now();
+                                       last_ping_call = Instant::now();
                                }
                        }
                });
@@ -448,8 +468,10 @@ mod tests {
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
                loop {
                        let log_entries = nodes[0].logger.lines.lock().unwrap();
-                       let desired_log = "Calling ChannelManager's and PeerManager's timer_tick_occurred".to_string();
-                       if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() {
+                       let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
+                       let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
+                       if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
+                                       log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
                                break
                        }
                }
index dc2cc54b53de422942e9718762f8eb7f286a3e18..09383d40301b9f387cd00ec6060dffc5d6674374 100644 (file)
@@ -5052,6 +5052,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
                                                channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
                                        // But if the channel is behind of the monitor, close the channel:
+                                       log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
+                                       log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
+                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
                                        let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
index be12a32e99e5737ffa1a96236d269a917add4a14..42b2694111f1c27764a0dc78b150178d5cb14458 100644 (file)
@@ -1372,9 +1372,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref> PeerManager<D
                }
        }
 
-       /// This function should be called roughly once every 30 seconds.
-       /// It will send pings to each peer and disconnect those which did not respond to the last
-       /// round of pings.
+       /// Send pings to each peer and disconnect those which did not respond to the last round of
+       /// pings.
+       ///
+       /// This may be called on any timescale you want, however, roughly once every five to ten
+       /// seconds is preferred. The call rate determines both how often we send a ping to our peers
+       /// and how much time they have to respond before we disconnect them.
        ///
        /// May call [`send_data`] on all [`SocketDescriptor`]s. Thus, be very careful with reentrancy
        /// issues!