use std::io::BufReader;
use std::ops::Deref;
use std::sync::Arc;
-use std::time::SystemTime;
use bitcoin::blockdata::constants::ChainHash;
use lightning::log_info;
blob
}
-async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, snapshot_calculation_time: Option<SystemTime>, logger: L) -> SerializedResponse where L::Target: Logger {
+async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, snapshot_reference_timestamp: Option<u64>, logger: L) -> SerializedResponse where L::Target: Logger {
let client = connect_to_db().await;
network_graph.remove_stale_channels_and_tracking();
};
let mut delta_set = DeltaSet::new();
- lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, snapshot_calculation_time, logger.clone()).await;
+ lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, snapshot_reference_timestamp, logger.clone()).await;
log_info!(logger, "announcement channel count: {}", delta_set.len());
lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
log_info!(logger, "update-fetched channel count: {}", delta_set.len());
/// whether they had been seen before.
/// Also include all announcements for which the first update was announced
/// after `last_sync_timestamp`
-pub(super) async fn fetch_channel_announcements<L: Deref>(delta_set: &mut DeltaSet, network_graph: Arc<NetworkGraph<L>>, client: &Client, last_sync_timestamp: u32, snapshot_calculation_time: Option<SystemTime>, logger: L) where L::Target: Logger {
+pub(super) async fn fetch_channel_announcements<L: Deref>(delta_set: &mut DeltaSet, network_graph: Arc<NetworkGraph<L>>, client: &Client, last_sync_timestamp: u32, snapshot_reference_timestamp: Option<u64>, logger: L) where L::Target: Logger {
log_info!(logger, "Obtaining channel ids from network graph");
let channel_ids = {
let read_only_graph = network_graph.read_only();
log_info!(logger, "Last sync timestamp: {}", last_sync_timestamp);
let last_sync_timestamp_float = last_sync_timestamp as f64;
- let current_time = snapshot_calculation_time.unwrap_or(SystemTime::now());
- let current_timestamp = current_time.duration_since(UNIX_EPOCH).unwrap().as_secs();
+ let current_timestamp = snapshot_reference_timestamp.unwrap_or(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs());
log_info!(logger, "Current timestamp: {}", current_timestamp);
let include_reminders = {
log_debug!(logger, "Current day index: {}", current_day);
log_debug!(logger, "Current hour: {}", current_hour);
- // anytime between 11pm and 1am
- let is_reminder_hour = current_hour < 2 || current_hour > 22;
+ // every 5th day at midnight
+ let is_reminder_hour = (current_hour % 24) == 0;
let is_reminder_day = (current_day % 5) == 0;
let snapshot_scope = current_timestamp.saturating_sub(last_sync_timestamp as u64);
// Steps:
// — Obtain all updates, distinct by (scid, direction), ordered by seen DESC
// — From those updates, select distinct by (scid), ordered by seen ASC (to obtain the older one per direction)
- let reminder_threshold_timestamp = current_time.checked_sub(config::CHANNEL_REMINDER_AGE).unwrap().duration_since(UNIX_EPOCH).unwrap().as_secs() as f64;
+ let reminder_threshold_timestamp = current_timestamp.checked_sub(config::CHANNEL_REMINDER_AGE.as_secs()).unwrap() as f64;
log_info!(logger, "Fetch first time we saw the current value combination for each direction (prior mutations excepted)");
- let reminder_lookup_threshold_timestamp = current_time.checked_sub(config::CHANNEL_REMINDER_AGE * 3).unwrap().duration_since(UNIX_EPOCH).unwrap().as_secs() as f64;
+ let reminder_lookup_threshold_timestamp = current_timestamp.checked_sub(config::CHANNEL_REMINDER_AGE.as_secs() * 3).unwrap() as f64;
let params: [&(dyn tokio_postgres::types::ToSql + Sync); 2] = [&channel_ids, &reminder_lookup_threshold_timestamp];
/*
// 1. get the current timestamp
let snapshot_generation_time = SystemTime::now();
let snapshot_generation_timestamp = snapshot_generation_time.duration_since(UNIX_EPOCH).unwrap().as_secs();
- let reference_timestamp = Self::round_down_to_nearest_multiple(snapshot_generation_timestamp, snapshot_interval as u64);
+ let reference_timestamp = Self::round_down_to_nearest_multiple(snapshot_generation_timestamp, snapshot_interval);
log_info!(self.logger, "Capturing snapshots at {} for: {}", snapshot_generation_timestamp, reference_timestamp);
// 2. sleep until the next round interval
{
log_info!(self.logger, "Calculating {}-second snapshot", current_scope);
// calculate the snapshot
- let snapshot = super::serialize_delta(network_graph_clone, current_last_sync_timestamp.clone() as u32, Some(snapshot_generation_time), self.logger.clone()).await;
+ let snapshot = super::serialize_delta(network_graph_clone, current_last_sync_timestamp.clone() as u32, Some(reference_timestamp), self.logger.clone()).await;
// persist the snapshot and update the symlink
let snapshot_filename = format!("snapshot__calculated-at:{}__range:{}-scope__previous-sync:{}.lngossip", reference_timestamp, current_scope, current_last_sync_timestamp);