Ignore trailing commas when parsing peers, and log more on error.
[rapid-gossip-sync-server] / src / config.rs
index a1289263ce155978246e469c743e924d36006aad..dd078904916fc7787e59f87c1b2f6b65d5a8d72a 100644 (file)
@@ -1,33 +1,92 @@
+use crate::hex_utils;
+
 use std::convert::TryInto;
 use std::env;
-use std::net::SocketAddr;
 use std::io::Cursor;
+use std::net::{SocketAddr, ToSocketAddrs};
+use std::time::Duration;
+
+use bitcoin::Network;
+use bitcoin::hashes::hex::FromHex;
 use bitcoin::secp256k1::PublicKey;
+use futures::stream::{FuturesUnordered, StreamExt};
 use lightning::ln::msgs::ChannelAnnouncement;
 use lightning::util::ser::Readable;
 use lightning_block_sync::http::HttpEndpoint;
 use tokio_postgres::Config;
-use crate::hex_utils;
-
-use futures::stream::{FuturesUnordered, StreamExt};
 
-pub(crate) const SCHEMA_VERSION: i32 = 5;
-pub(crate) const SNAPSHOT_CALCULATION_INTERVAL: u32 = 3600 * 24; // every 24 hours, in seconds
+pub(crate) const SCHEMA_VERSION: i32 = 13;
+pub(crate) const SYMLINK_GRANULARITY_INTERVAL: u32 = 3600 * 3; // three hours
+pub(crate) const MAX_SNAPSHOT_SCOPE: u32 = 3600 * 24 * 21; // three weeks
+// generate symlinks based on a 3-hour-granularity
+/// If the last update in either direction was more than six days ago, we send a reminder
+/// That reminder may be either in the form of a channel announcement, or in the form of empty
+/// updates in both directions.
+pub(crate) const CHANNEL_REMINDER_AGE: Duration = Duration::from_secs(6 * 24 * 60 * 60);
+/// The number of successful peer connections to await prior to continuing to gossip storage.
+/// The application will still work if the number of specified peers is lower, as long as there is
+/// at least one successful peer connection, but it may result in long startup times.
+pub(crate) const CONNECTED_PEER_ASSERTION_LIMIT: usize = 5;
 pub(crate) const DOWNLOAD_NEW_GOSSIP: bool = true;
 
-pub(crate) fn network_graph_cache_path() -> &'static str {
-       "./res/network_graph.bin"
+pub(crate) fn snapshot_generation_interval() -> u32 {
+       let interval = env::var("RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL").unwrap_or(SYMLINK_GRANULARITY_INTERVAL.to_string())
+               .parse::<u32>()
+               .expect("RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL env variable must be a u32.");
+       assert!(interval > 0, "RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL must be positive");
+       assert_eq!(interval % SYMLINK_GRANULARITY_INTERVAL, 0, "RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL must be a multiple of {} (seconds)", SYMLINK_GRANULARITY_INTERVAL);
+       interval
+}
+
+pub(crate) fn network() -> Network {
+       let network = env::var("RAPID_GOSSIP_SYNC_SERVER_NETWORK").unwrap_or("bitcoin".to_string()).to_lowercase();
+       match network.as_str() {
+               "mainnet" => Network::Bitcoin,
+               "bitcoin" => Network::Bitcoin,
+               "testnet" => Network::Testnet,
+               "signet" => Network::Signet,
+               "regtest" => Network::Regtest,
+               _ => panic!("Invalid network"),
+       }
+}
+
+pub(crate) fn log_level() -> lightning::util::logger::Level {
+       let level = env::var("RAPID_GOSSIP_SYNC_SERVER_LOG_LEVEL").unwrap_or("info".to_string()).to_lowercase();
+       match level.as_str() {
+               "gossip" => lightning::util::logger::Level::Gossip,
+               "trace" => lightning::util::logger::Level::Trace,
+               "debug" => lightning::util::logger::Level::Debug,
+               "info" => lightning::util::logger::Level::Info,
+               "warn" => lightning::util::logger::Level::Warn,
+               "error" => lightning::util::logger::Level::Error,
+               _ => panic!("Invalid log level"),
+       }
+}
+
+pub(crate) fn network_graph_cache_path() -> String {
+       format!("{}/network_graph.bin", cache_path())
+}
+
+pub(crate) fn cache_path() -> String {
+       let path = env::var("RAPID_GOSSIP_SYNC_SERVER_CACHES_PATH").unwrap_or("./res".to_string()).to_lowercase();
+       path
 }
 
 pub(crate) fn db_connection_config() -> Config {
        let mut config = Config::new();
-       let host = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_HOST").unwrap_or("localhost".to_string());
-       let user = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_USER").unwrap_or("alice".to_string());
-       let db = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_NAME").unwrap_or("ln_graph_sync".to_string());
+       let env_name_prefix = if cfg!(test) {
+               "RAPID_GOSSIP_TEST_DB"
+       } else {
+               "RAPID_GOSSIP_SYNC_SERVER_DB"
+       };
+
+       let host = env::var(format!("{}{}", env_name_prefix, "_HOST")).unwrap_or("localhost".to_string());
+       let user = env::var(format!("{}{}", env_name_prefix, "_USER")).unwrap_or("alice".to_string());
+       let db = env::var(format!("{}{}", env_name_prefix, "_NAME")).unwrap_or("ln_graph_sync".to_string());
        config.host(&host);
        config.user(&user);
        config.dbname(&db);
-       if let Ok(password) = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_PASSWORD") {
+       if let Ok(password) = env::var(format!("{}{}", env_name_prefix, "_PASSWORD")) {
                config.password(&password);
        }
        config
@@ -54,38 +113,37 @@ pub(crate) fn db_announcement_table_creation_query() -> &'static str {
        "CREATE TABLE IF NOT EXISTS channel_announcements (
                id SERIAL PRIMARY KEY,
                short_channel_id bigint NOT NULL UNIQUE,
-               block_height integer,
                announcement_signed BYTEA,
                seen timestamp NOT NULL DEFAULT NOW()
        )"
 }
 
 pub(crate) fn db_channel_update_table_creation_query() -> &'static str {
-       // We'll run out of room in composite index at block 8,388,608 or in the year 2286
        "CREATE TABLE IF NOT EXISTS channel_updates (
                id SERIAL PRIMARY KEY,
-               composite_index character(29) UNIQUE,
                short_channel_id bigint NOT NULL,
-               timestamp bigint,
-               channel_flags integer,
+               timestamp bigint NOT NULL,
+               channel_flags smallint NOT NULL,
                direction boolean NOT NULL,
-               disable boolean,
-               cltv_expiry_delta integer,
-               htlc_minimum_msat bigint,
-               fee_base_msat integer,
-               fee_proportional_millionths integer,
-               htlc_maximum_msat bigint,
-               blob_signed BYTEA,
+               disable boolean NOT NULL,
+               cltv_expiry_delta integer NOT NULL,
+               htlc_minimum_msat bigint NOT NULL,
+               fee_base_msat integer NOT NULL,
+               fee_proportional_millionths integer NOT NULL,
+               htlc_maximum_msat bigint NOT NULL,
+               blob_signed BYTEA NOT NULL,
                seen timestamp NOT NULL DEFAULT NOW()
        )"
 }
 
 pub(crate) fn db_index_creation_query() -> &'static str {
        "
-       CREATE INDEX IF NOT EXISTS channels_seen ON channel_announcements(seen);
-       CREATE INDEX IF NOT EXISTS channel_updates_scid ON channel_updates(short_channel_id);
-       CREATE INDEX IF NOT EXISTS channel_updates_direction ON channel_updates (short_channel_id, direction);
+       CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
+       CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_asc ON channel_updates(short_channel_id, direction, seen);
+       CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_desc_with_id ON channel_updates(short_channel_id ASC, direction ASC, seen DESC) INCLUDE (id);
+       CREATE UNIQUE INDEX IF NOT EXISTS channel_updates_key ON channel_updates (short_channel_id, direction, timestamp);
        CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen);
+       CREATE INDEX IF NOT EXISTS channel_updates_scid_asc_timestamp_desc ON channel_updates(short_channel_id ASC, timestamp DESC);
        "
 }
 
@@ -124,10 +182,8 @@ pub(crate) async fn upgrade_db(schema: i32, client: &mut tokio_postgres::Client)
                                        tx_ref.execute("UPDATE channel_updates SET short_channel_id = $1, direction = $2 WHERE id = $3", &[&scid, &direction, &id]).await.unwrap();
                                });
                        }
-                       while let Some(_) = updates.next().await { }
+                       while let Some(_) = updates.next().await {}
                }
-               tx.execute("CREATE INDEX channel_updates_scid ON channel_updates(short_channel_id)", &[]).await.unwrap();
-               tx.execute("CREATE INDEX channel_updates_direction ON channel_updates (short_channel_id, direction)", &[]).await.unwrap();
                tx.execute("ALTER TABLE channel_updates ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
                tx.execute("ALTER TABLE channel_updates ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
                tx.execute("ALTER TABLE channel_updates ALTER direction DROP DEFAULT", &[]).await.unwrap();
@@ -153,7 +209,7 @@ pub(crate) async fn upgrade_db(schema: i32, client: &mut tokio_postgres::Client)
                                        tx_ref.execute("UPDATE channel_announcements SET short_channel_id = $1 WHERE id = $2", &[&scid, &id]).await.unwrap();
                                });
                        }
-                       while let Some(_) = updates.next().await { }
+                       while let Some(_) = updates.next().await {}
                }
                tx.execute("ALTER TABLE channel_announcements ADD CONSTRAINT channel_announcements_short_channel_id_key UNIQUE (short_channel_id)", &[]).await.unwrap();
                tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
@@ -167,24 +223,162 @@ pub(crate) async fn upgrade_db(schema: i32, client: &mut tokio_postgres::Client)
                tx.execute("UPDATE config SET db_schema = 5 WHERE id = 1", &[]).await.unwrap();
                tx.commit().await.unwrap();
        }
+       if schema >= 1 && schema <= 5 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET DATA TYPE smallint", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_announcements DROP COLUMN block_height", &[]).await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 6 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
+       if schema >= 1 && schema <= 6 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("ALTER TABLE channel_updates DROP COLUMN composite_index", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER timestamp SET NOT NULL", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET NOT NULL", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER disable SET NOT NULL", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER cltv_expiry_delta SET NOT NULL", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER htlc_minimum_msat SET NOT NULL", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER fee_base_msat SET NOT NULL", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER fee_proportional_millionths SET NOT NULL", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER htlc_maximum_msat SET NOT NULL", &[]).await.unwrap();
+               tx.execute("ALTER TABLE channel_updates ALTER blob_signed SET NOT NULL", &[]).await.unwrap();
+               tx.execute("CREATE UNIQUE INDEX channel_updates_key ON channel_updates (short_channel_id, direction, timestamp)", &[]).await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 7 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
+       if schema >= 1 && schema <= 7 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channels_seen", &[]).await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_scid", &[]).await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_direction", &[]).await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_seen", &[]).await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_scid_seen", &[]).await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_scid_dir_seen", &[]).await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 8 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
+       if schema >= 1 && schema <= 8 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_seen", &[]).await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_scid_seen", &[]).await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 9 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
+       if schema >= 1 && schema <= 9 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_scid_dir_seen", &[]).await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 10 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
+       if schema >= 1 && schema <= 10 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_id_with_scid_dir_blob", &[]).await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 11 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
+       if schema >= 1 && schema <= 11 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_seen_with_id_direction_blob", &[]).await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 12 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
+       if schema >= 1 && schema <= 12 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_timestamp_desc", &[]).await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 13 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
        if schema <= 1 || schema > SCHEMA_VERSION {
                panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
        }
+       // PostgreSQL (at least v13, but likely later versions as well) handles insert-only tables
+       // *very* poorly. After some number of inserts, it refuses to rely on indexes, assuming them to
+       // be possibly-stale, until a VACUUM happens. Thus, we set the vacuum factor really low here,
+       // pushing PostgreSQL to vacuum often.
+       // See https://www.cybertec-postgresql.com/en/postgresql-autovacuum-insert-only-tables/
+       let _ = client.execute("ALTER TABLE channel_updates SET ( autovacuum_vacuum_insert_scale_factor = 0.005 );", &[]).await;
+       let _ = client.execute("ALTER TABLE channel_announcements SET ( autovacuum_vacuum_insert_scale_factor = 0.005 );", &[]).await;
 }
 
-/// EDIT ME
 pub(crate) fn ln_peers() -> Vec<(PublicKey, SocketAddr)> {
-       vec![
-               // Bitfinex
-               // (hex_utils::to_compressed_pubkey("033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025").unwrap(), "34.65.85.39:9735".parse().unwrap()),
+       const WALLET_OF_SATOSHI: &str = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
+       let list = env::var("LN_PEERS").unwrap_or(WALLET_OF_SATOSHI.to_string());
+       let mut peers = Vec::new();
+       for (item, peer_info) in list.split(',').enumerate() {
+               // Ignore leading or trailing whitespace
+               let trimmed_peer_info = peer_info.trim();
+               // Ignore trailing or repeated commas
+               if !trimmed_peer_info.is_empty() {
+                       peers.push(resolve_peer_info(trimmed_peer_info).unwrap_or_else(|_| {
+                               panic!("Invalid peer info in LN_PEERS at item {}: {}", item, peer_info)
+                       }));
+               }
+       }
+       peers
+}
+
+fn resolve_peer_info(peer_info: &str) -> Result<(PublicKey, SocketAddr), &str> {
+       let mut peer_info = peer_info.splitn(2, '@');
+
+       let pubkey = peer_info.next().ok_or("Invalid peer info. Should be formatted as: `pubkey@host:port`")?;
+       let pubkey = Vec::from_hex(pubkey).map_err(|_| "Invalid node pubkey")?;
+       let pubkey = PublicKey::from_slice(&pubkey).map_err(|_| "Invalid node pubkey")?;
+
+       let socket_address = peer_info.next().ok_or("Invalid peer info. Should be formatted as: `pubkey@host:port`")?;
+       let socket_address = socket_address
+               .to_socket_addrs()
+               .map_err(|_| "Cannot resolve node address")?
+               .next()
+               .ok_or("Cannot resolve node address")?;
 
-               // Matt Corallo
-               // (hex_utils::to_compressed_pubkey("03db10aa09ff04d3568b0621750794063df401e6853c79a21a83e1a3f3b5bfb0c8").unwrap(), "69.59.18.80:9735".parse().unwrap())
+       Ok((pubkey, socket_address))
+}
+
+#[cfg(test)]
+mod tests {
+       use super::*;
+       use bitcoin::hashes::hex::ToHex;
+       use std::str::FromStr;
+
+       #[test]
+       fn test_resolve_peer_info() {
+               let wallet_of_satoshi = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
+               let (pubkey, socket_address) = resolve_peer_info(wallet_of_satoshi).unwrap();
+               assert_eq!(pubkey.serialize().to_hex(), "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226");
+               assert_eq!(socket_address.to_string(), "170.75.163.209:9735");
+
+               let ipv6 = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@[2001:db8::1]:80";
+               let (pubkey, socket_address) = resolve_peer_info(ipv6).unwrap();
+               assert_eq!(pubkey.serialize().to_hex(), "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025");
+               assert_eq!(socket_address.to_string(), "[2001:db8::1]:80");
+
+               let localhost = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@localhost:9735";
+               let (pubkey, socket_address) = resolve_peer_info(localhost).unwrap();
+               assert_eq!(pubkey.serialize().to_hex(), "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025");
+               let socket_address = socket_address.to_string();
+               assert!(socket_address == "127.0.0.1:9735" || socket_address == "[::1]:9735");
+       }
 
-               // River Financial
-               // (hex_utils::to_compressed_pubkey("03037dc08e9ac63b82581f79b662a4d0ceca8a8ca162b1af3551595b8f2d97b70a").unwrap(), "104.196.249.140:9735".parse().unwrap())
+    #[test]
+    fn test_ln_peers() {
+        // Set the environment variable, including a repeated comma, leading space, and trailing comma.
+        std::env::set_var("LN_PEERS", "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735,, 035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227@170.75.163.210:9735,");
+        let peers = ln_peers();
+        
+        // Assert output is as expected
+        assert_eq!(
+            peers,
+            vec![
+                (
+                    PublicKey::from_str("035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226").unwrap(), 
+                    SocketAddr::from_str("170.75.163.209:9735").unwrap()
+                ),
+                (
+                    PublicKey::from_str("035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227").unwrap(), 
+                    SocketAddr::from_str("170.75.163.210:9735").unwrap()
+                )
+            ]
+        );
+    }
 
-               // Wallet of Satoshi | 035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735
-               (hex_utils::to_compressed_pubkey("035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226").unwrap(), "170.75.163.209:9735".parse().unwrap())
-       ]
 }