/// then there is a risk of channels force-closing on startup when the manager realizes it's
/// outdated. However, as long as `ChannelMonitor` backups are sound, no funds besides those used
/// for unilateral chain closure fees are at risk.
+#[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
pub struct BackgroundProcessor {
stop_thread: Arc<AtomicBool>,
thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
#[cfg(test)]
const FRESHNESS_TIMER: u64 = 1;
+const PING_TIMER: u64 = 5;
+
/// Trait which handles persisting a [`ChannelManager`] to disk.
///
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
let stop_thread = Arc::new(AtomicBool::new(false));
let stop_thread_clone = stop_thread.clone();
let handle = thread::spawn(move || -> Result<(), std::io::Error> {
- let mut current_time = Instant::now();
+ let mut last_freshness_call = Instant::now();
+ let mut last_ping_call = Instant::now();
loop {
peer_manager.process_events();
channel_manager.process_pending_events(&event_handler);
log_trace!(logger, "Terminating background processor.");
return Ok(());
}
- if current_time.elapsed().as_secs() > FRESHNESS_TIMER {
- log_trace!(logger, "Calling ChannelManager's and PeerManager's timer_tick_occurred");
+ if last_freshness_call.elapsed().as_secs() > FRESHNESS_TIMER {
+ log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
channel_manager.timer_tick_occurred();
+ last_freshness_call = Instant::now();
+ }
+ if last_ping_call.elapsed().as_secs() > PING_TIMER * 2 {
+ // On various platforms, we may be starved of CPU cycles for several reasons.
+ // E.g. on iOS, if we've been in the background, we will be entirely paused.
+ // Similarly, if we're on a desktop platform and the device has been asleep, we
+ // may not get any cycles.
+ // In any case, if we've been entirely paused for more than double our ping
+ // timer, we should have disconnected all sockets by now (and they're probably
+ // dead anyway), so disconnect them by calling `timer_tick_occurred()` twice.
+ log_trace!(logger, "Awoke after more than double our ping timer, disconnecting peers.");
+ peer_manager.timer_tick_occurred();
+ peer_manager.timer_tick_occurred();
+ last_ping_call = Instant::now();
+ } else if last_ping_call.elapsed().as_secs() > PING_TIMER {
+ log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
peer_manager.timer_tick_occurred();
- current_time = Instant::now();
+ last_ping_call = Instant::now();
}
}
});
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
loop {
let log_entries = nodes[0].logger.lines.lock().unwrap();
- let desired_log = "Calling ChannelManager's and PeerManager's timer_tick_occurred".to_string();
- if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() {
+ let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
+ let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
+ if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
+ log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
break
}
}
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
// But if the channel is behind of the monitor, close the channel:
+ log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
+ log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
+ log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+ log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
failed_htlcs.append(&mut new_failed_htlcs);
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);