use crate::hex_utils;
-use std::convert::TryInto;
use std::env;
use std::io::Cursor;
use std::net::{SocketAddr, ToSocketAddrs};
use lightning_block_sync::http::HttpEndpoint;
use tokio_postgres::Config;
-pub(crate) const SCHEMA_VERSION: i32 = 11;
-pub(crate) const SNAPSHOT_CALCULATION_INTERVAL: u32 = 3600 * 24; // every 24 hours, in seconds
+pub(crate) const SCHEMA_VERSION: i32 = 13;
+pub(crate) const SYMLINK_GRANULARITY_INTERVAL: u32 = 3600 * 3; // three hours
+pub(crate) const MAX_SNAPSHOT_SCOPE: u32 = 3600 * 24 * 21; // three weeks
+// generate symlinks based on a 3-hour-granularity
/// If the last update in either direction was more than six days ago, we send a reminder
/// That reminder may be either in the form of a channel announcement, or in the form of empty
/// updates in both directions.
pub(crate) const CHANNEL_REMINDER_AGE: Duration = Duration::from_secs(6 * 24 * 60 * 60);
+/// The number of successful peer connections to await prior to continuing to gossip storage.
+/// The application will still work if the number of specified peers is lower, as long as there is
+/// at least one successful peer connection, but it may result in long startup times.
+pub(crate) const CONNECTED_PEER_ASSERTION_LIMIT: usize = 5;
pub(crate) const DOWNLOAD_NEW_GOSSIP: bool = true;
+pub(crate) fn snapshot_generation_interval() -> u32 {
+ let interval = env::var("RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL").unwrap_or(SYMLINK_GRANULARITY_INTERVAL.to_string())
+ .parse::<u32>()
+ .expect("RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL env variable must be a u32.");
+ assert!(interval > 0, "RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL must be positive");
+ assert_eq!(interval % SYMLINK_GRANULARITY_INTERVAL, 0, "RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL must be a multiple of {} (seconds)", SYMLINK_GRANULARITY_INTERVAL);
+ interval
+}
+
pub(crate) fn network() -> Network {
let network = env::var("RAPID_GOSSIP_SYNC_SERVER_NETWORK").unwrap_or("bitcoin".to_string()).to_lowercase();
match network.as_str() {
}
}
+pub(crate) fn log_level() -> lightning::util::logger::Level {
+ let level = env::var("RAPID_GOSSIP_SYNC_SERVER_LOG_LEVEL").unwrap_or("info".to_string()).to_lowercase();
+ match level.as_str() {
+ "gossip" => lightning::util::logger::Level::Gossip,
+ "trace" => lightning::util::logger::Level::Trace,
+ "debug" => lightning::util::logger::Level::Debug,
+ "info" => lightning::util::logger::Level::Info,
+ "warn" => lightning::util::logger::Level::Warn,
+ "error" => lightning::util::logger::Level::Error,
+ _ => panic!("Invalid log level"),
+ }
+}
+
pub(crate) fn network_graph_cache_path() -> String {
format!("{}/network_graph.bin", cache_path())
}
pub(crate) fn db_connection_config() -> Config {
let mut config = Config::new();
- let host = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_HOST").unwrap_or("localhost".to_string());
- let user = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_USER").unwrap_or("alice".to_string());
- let db = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_NAME").unwrap_or("ln_graph_sync".to_string());
+ let env_name_prefix = if cfg!(test) {
+ "RAPID_GOSSIP_TEST_DB"
+ } else {
+ "RAPID_GOSSIP_SYNC_SERVER_DB"
+ };
+
+ let host = env::var(format!("{}{}", env_name_prefix, "_HOST")).unwrap_or("localhost".to_string());
+ let user = env::var(format!("{}{}", env_name_prefix, "_USER")).unwrap_or("alice".to_string());
+ let db = env::var(format!("{}{}", env_name_prefix, "_NAME")).unwrap_or("ln_graph_sync".to_string());
config.host(&host);
config.user(&user);
config.dbname(&db);
- if let Ok(password) = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_PASSWORD") {
+ if let Ok(password) = env::var(format!("{}{}", env_name_prefix, "_PASSWORD")) {
config.password(&password);
}
config
pub(crate) fn db_index_creation_query() -> &'static str {
"
- CREATE INDEX IF NOT EXISTS channel_updates_seen_with_id_direction_blob ON channel_updates(seen) INCLUDE (id, direction, blob_signed);
CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_asc ON channel_updates(short_channel_id, direction, seen);
CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_desc_with_id ON channel_updates(short_channel_id ASC, direction ASC, seen DESC) INCLUDE (id);
CREATE UNIQUE INDEX IF NOT EXISTS channel_updates_key ON channel_updates (short_channel_id, direction, timestamp);
+ CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen);
+ CREATE INDEX IF NOT EXISTS channel_updates_scid_asc_timestamp_desc ON channel_updates(short_channel_id ASC, timestamp DESC);
"
}
tx_ref.execute("UPDATE channel_updates SET short_channel_id = $1, direction = $2 WHERE id = $3", &[&scid, &direction, &id]).await.unwrap();
});
}
- while let Some(_) = updates.next().await { }
+ while let Some(_) = updates.next().await {}
}
tx.execute("ALTER TABLE channel_updates ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
tx.execute("ALTER TABLE channel_updates ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
tx_ref.execute("UPDATE channel_announcements SET short_channel_id = $1 WHERE id = $2", &[&scid, &id]).await.unwrap();
});
}
- while let Some(_) = updates.next().await { }
+ while let Some(_) = updates.next().await {}
}
tx.execute("ALTER TABLE channel_announcements ADD CONSTRAINT channel_announcements_short_channel_id_key UNIQUE (short_channel_id)", &[]).await.unwrap();
tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
tx.execute("UPDATE config SET db_schema = 11 WHERE id = 1", &[]).await.unwrap();
tx.commit().await.unwrap();
}
+ if schema >= 1 && schema <= 11 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("DROP INDEX IF EXISTS channel_updates_seen_with_id_direction_blob", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 12 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
+ if schema >= 1 && schema <= 12 {
+ let tx = client.transaction().await.unwrap();
+ tx.execute("DROP INDEX IF EXISTS channel_updates_timestamp_desc", &[]).await.unwrap();
+ tx.execute("UPDATE config SET db_schema = 13 WHERE id = 1", &[]).await.unwrap();
+ tx.commit().await.unwrap();
+ }
if schema <= 1 || schema > SCHEMA_VERSION {
panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
}
const WALLET_OF_SATOSHI: &str = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
let list = env::var("LN_PEERS").unwrap_or(WALLET_OF_SATOSHI.to_string());
let mut peers = Vec::new();
- for peer_info in list.split(',') {
- peers.push(resolve_peer_info(peer_info).expect("Invalid peer info in LN_PEERS"));
+ for (item, peer_info) in list.split(',').enumerate() {
+ // Ignore leading or trailing whitespace
+ let trimmed_peer_info = peer_info.trim();
+ // Ignore trailing or repeated commas
+ if !trimmed_peer_info.is_empty() {
+ peers.push(resolve_peer_info(trimmed_peer_info).unwrap_or_else(|_| {
+ panic!("Invalid peer info in LN_PEERS at item {}: {}", item, peer_info)
+ }));
+ }
}
peers
}
#[cfg(test)]
mod tests {
- use super::resolve_peer_info;
- use bitcoin::hashes::hex::ToHex;
+ use super::*;
+ use hex_conservative::DisplayHex;
+ use std::str::FromStr;
#[test]
fn test_resolve_peer_info() {
let wallet_of_satoshi = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
let (pubkey, socket_address) = resolve_peer_info(wallet_of_satoshi).unwrap();
- assert_eq!(pubkey.serialize().to_hex(), "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226");
+ assert_eq!(
+ pubkey.serialize().to_lower_hex_string(),
+ "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226"
+ );
assert_eq!(socket_address.to_string(), "170.75.163.209:9735");
let ipv6 = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@[2001:db8::1]:80";
let (pubkey, socket_address) = resolve_peer_info(ipv6).unwrap();
- assert_eq!(pubkey.serialize().to_hex(), "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025");
+ assert_eq!(
+ pubkey.serialize().to_lower_hex_string(),
+ "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025"
+ );
assert_eq!(socket_address.to_string(), "[2001:db8::1]:80");
let localhost = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@localhost:9735";
let (pubkey, socket_address) = resolve_peer_info(localhost).unwrap();
- assert_eq!(pubkey.serialize().to_hex(), "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025");
+ assert_eq!(
+ pubkey.serialize().to_lower_hex_string(),
+ "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025"
+ );
let socket_address = socket_address.to_string();
assert!(socket_address == "127.0.0.1:9735" || socket_address == "[::1]:9735");
}
+
+ #[test]
+ fn test_ln_peers() {
+ // Set the environment variable, including a repeated comma, leading space, and trailing comma.
+ std::env::set_var("LN_PEERS", "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735,, 035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227@170.75.163.210:9735,");
+ let peers = ln_peers();
+
+ // Assert output is as expected
+ assert_eq!(
+ peers,
+ vec![
+ (
+ PublicKey::from_str("035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226").unwrap(),
+ SocketAddr::from_str("170.75.163.209:9735").unwrap()
+ ),
+ (
+ PublicKey::from_str("035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227").unwrap(),
+ SocketAddr::from_str("170.75.163.210:9735").unwrap()
+ )
+ ]
+ );
+ }
}