| RAPID_GOSSIP_SYNC_SERVER_DB_PASSWORD | _None_ | Password to access Postgres |
| RAPID_GOSSIP_SYNC_SERVER_DB_NAME | ln_graph_sync | Name of the database to be used for gossip storage |
| RAPID_GOSSIP_SYNC_SERVER_NETWORK | mainnet | Network to operate in. Possible values are mainnet, testnet, signet, regtest |
+| RAPID_GOSSIP_SYNC_CALC_INTERVAL | 86400 | The interval in seconds that RGS creates a new snapshot |
| BITCOIN_REST_DOMAIN | 127.0.0.1 | Domain of the [bitcoind REST server](https://github.com/bitcoin/bitcoin/blob/master/doc/REST-interface.md) |
| BITCOIN_REST_PORT | 8332 | HTTP port of the bitcoind REST server |
| BITCOIN_REST_PATH | /rest/ | Path infix to access the bitcoind REST endpoints |
The snapshotting module is responsible for calculating and storing snapshots. It's started up
as soon as the first full graph sync completes, and then keeps updating the snapshots at a
-24-hour-interval.
+configurable interval with a 24 hour default.
### lookup
use lightning_block_sync::http::HttpEndpoint;
use tokio_postgres::Config;
-pub(crate) const SCHEMA_VERSION: i32 = 12;
+pub(crate) const SCHEMA_VERSION: i32 = 13;
pub(crate) const SNAPSHOT_CALCULATION_INTERVAL: u32 = 3600 * 24; // every 24 hours, in seconds
/// If the last update in either direction was more than six days ago, we send a reminder
/// That reminder may be either in the form of a channel announcement, or in the form of empty
pub(crate) const CHANNEL_REMINDER_AGE: Duration = Duration::from_secs(6 * 24 * 60 * 60);
pub(crate) const DOWNLOAD_NEW_GOSSIP: bool = true;
+pub(crate) fn calculate_interval() -> u32 {
+ let interval = env::var("RAPID_GOSSIP_SYNC_CALC_INTERVAL").unwrap_or("86400".to_string())
+ .parse::<u32>()
+ .expect("RAPID_GOSSIP_SYNC_CALC_INTERVAL env variable must be a u32.");
+ interval
+}
+
pub(crate) fn network() -> Network {
let network = env::var("RAPID_GOSSIP_SYNC_SERVER_NETWORK").unwrap_or("bitcoin".to_string()).to_lowercase();
match network.as_str() {
let mut blob = GOSSIP_PREFIX.to_vec();
let network = config::network();
+ let calc_interval = config::calculate_interval();
let genesis_block = bitcoin::blockdata::constants::genesis_block(network);
let chain_hash = genesis_block.block_hash();
chain_hash.write(&mut blob).unwrap();
- let blob_timestamp = Snapshotter::<Arc<RGSSLogger>>::round_down_to_nearest_multiple(current_timestamp, config::SNAPSHOT_CALCULATION_INTERVAL as u64) as u32;
+ let blob_timestamp = Snapshotter::<Arc<RGSSLogger>>::round_down_to_nearest_multiple(current_timestamp, calc_interval as u64) as u32;
blob_timestamp.write(&mut blob).unwrap();
0u32.write(&mut blob).unwrap(); // node count
network_graph.remove_stale_channels_and_tracking();
let mut output: Vec<u8> = vec![];
+ let calc_interval = config::calculate_interval();
// set a flag if the chain hash is prepended
// chain hash only necessary if either channel announcements or non-incremental updates are present
serialization_details.chain_hash.write(&mut prefixed_output).unwrap();
// always write the latest seen timestamp
let latest_seen_timestamp = serialization_details.latest_seen;
- let overflow_seconds = latest_seen_timestamp % config::SNAPSHOT_CALCULATION_INTERVAL;
+ let overflow_seconds = latest_seen_timestamp % calc_interval;
let serialized_seen_timestamp = latest_seen_timestamp.saturating_sub(overflow_seconds);
serialized_seen_timestamp.write(&mut prefixed_output).unwrap();
pub(crate) async fn snapshot_gossip(&self) {
log_info!(self.logger, "Initiating snapshotting service");
+ let calc_interval = config::calculate_interval();
let snapshot_sync_day_factors = [1, 2, 3, 4, 5, 6, 7, 14, 21, u64::MAX];
const DAY_SECONDS: u64 = 60 * 60 * 24;
+ let round_day_seconds = calc_interval as u64;
let pending_snapshot_directory = format!("{}/snapshots_pending", cache_path());
let pending_symlink_directory = format!("{}/symlinks_pending", cache_path());
let reference_timestamp = Self::round_down_to_nearest_multiple(snapshot_generation_timestamp, config::SNAPSHOT_CALCULATION_INTERVAL as u64);
log_info!(self.logger, "Capturing snapshots at {} for: {}", snapshot_generation_timestamp, reference_timestamp);
- // 2. sleep until the next round 24 hours
+ // 2. sleep until the next round interval
// 3. refresh all snapshots
// the stored snapshots should adhere to the following format