use lightning_block_sync::http::HttpEndpoint;
use tokio_postgres::Config;
-pub(crate) const SCHEMA_VERSION: i32 = 11;
+pub(crate) const SCHEMA_VERSION: i32 = 13;
pub(crate) const SNAPSHOT_CALCULATION_INTERVAL: u32 = 3600 * 24; // every 24 hours, in seconds
/// If the last update in either direction was more than six days ago, we send a reminder
/// That reminder may be either in the form of a channel announcement, or in the form of empty
pub(crate) const CHANNEL_REMINDER_AGE: Duration = Duration::from_secs(6 * 24 * 60 * 60);
pub(crate) const DOWNLOAD_NEW_GOSSIP: bool = true;
+pub(crate) fn calculate_interval() -> u32 {
+ let interval = env::var("RAPID_GOSSIP_SYNC_CALC_INTERVAL").unwrap_or("86400".to_string())
+ .parse::<u32>()
+ .expect("RAPID_GOSSIP_SYNC_CALC_INTERVAL env variable must be a u32.");
+ interval
+}
+
pub(crate) fn network() -> Network {
let network = env::var("RAPID_GOSSIP_SYNC_SERVER_NETWORK").unwrap_or("bitcoin".to_string()).to_lowercase();
match network.as_str() {
}
}
+pub(crate) fn log_level() -> lightning::util::logger::Level {
+ let level = env::var("RAPID_GOSSIP_SYNC_SERVER_LOG_LEVEL").unwrap_or("info".to_string()).to_lowercase();
+ match level.as_str() {
+ "gossip" => lightning::util::logger::Level::Gossip,
+ "trace" => lightning::util::logger::Level::Trace,
+ "debug" => lightning::util::logger::Level::Debug,
+ "info" => lightning::util::logger::Level::Info,
+ "warn" => lightning::util::logger::Level::Warn,
+ "error" => lightning::util::logger::Level::Error,
+ _ => panic!("Invalid log level"),
+ }
+}
+
pub(crate) fn network_graph_cache_path() -> String {
format!("{}/network_graph.bin", cache_path())
}
pub(crate) fn db_index_creation_query() -> &'static str {
"
- CREATE INDEX IF NOT EXISTS channel_updates_seen_with_id_direction_blob ON channel_updates(seen) INCLUDE (id, direction, blob_signed);
CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_asc ON channel_updates(short_channel_id, direction, seen);
CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_desc_with_id ON channel_updates(short_channel_id ASC, direction ASC, seen DESC) INCLUDE (id);
CREATE UNIQUE INDEX IF NOT EXISTS channel_updates_key ON channel_updates (short_channel_id, direction, timestamp);
+ CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen);
+ CREATE INDEX IF NOT EXISTS channel_updates_timestamp_desc ON channel_updates(timestamp DESC);
"
}
tx.execute("UPDATE config SET db_schema = 11 WHERE id = 1", &[]).await.unwrap();
tx.commit().await.unwrap();
}
+ if schema >= 1 && schema <= 11 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("DROP INDEX IF EXISTS channel_updates_seen_with_id_direction_blob", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 12 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
if schema <= 1 || schema > SCHEMA_VERSION {
panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
}