5 use std::net::{SocketAddr, ToSocketAddrs};
6 use std::time::Duration;
9 use bitcoin::hashes::hex::FromHex;
10 use bitcoin::secp256k1::PublicKey;
11 use futures::stream::{FuturesUnordered, StreamExt};
12 use lightning::ln::msgs::ChannelAnnouncement;
13 use lightning::util::ser::Readable;
14 use lightning_block_sync::http::HttpEndpoint;
15 use tokio_postgres::Config;
17 pub(crate) const SCHEMA_VERSION: i32 = 13;
18 pub(crate) const SYMLINK_GRANULARITY_INTERVAL: u32 = 3600 * 3; // three hours
19 pub(crate) const MAX_SNAPSHOT_SCOPE: u32 = 3600 * 24 * 21; // three weeks
20 // generate symlinks based on a 3-hour-granularity
21 /// If the last update in either direction was more than six days ago, we send a reminder
22 /// That reminder may be either in the form of a channel announcement, or in the form of empty
23 /// updates in both directions.
24 pub(crate) const CHANNEL_REMINDER_AGE: Duration = Duration::from_secs(6 * 24 * 60 * 60);
25 /// The number of successful peer connections to await prior to continuing to gossip storage.
26 /// The application will still work if the number of specified peers is lower, as long as there is
27 /// at least one successful peer connection, but it may result in long startup times.
28 pub(crate) const CONNECTED_PEER_ASSERTION_LIMIT: usize = 5;
29 pub(crate) const DOWNLOAD_NEW_GOSSIP: bool = true;
31 pub(crate) fn snapshot_generation_interval() -> u32 {
32 let interval = env::var("RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL").unwrap_or(SYMLINK_GRANULARITY_INTERVAL.to_string())
34 .expect("RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL env variable must be a u32.");
35 assert!(interval > 0, "RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL must be positive");
36 assert_eq!(interval % SYMLINK_GRANULARITY_INTERVAL, 0, "RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL must be a multiple of {} (seconds)", SYMLINK_GRANULARITY_INTERVAL);
40 pub(crate) fn network() -> Network {
41 let network = env::var("RAPID_GOSSIP_SYNC_SERVER_NETWORK").unwrap_or("bitcoin".to_string()).to_lowercase();
42 match network.as_str() {
43 "mainnet" => Network::Bitcoin,
44 "bitcoin" => Network::Bitcoin,
45 "testnet" => Network::Testnet,
46 "signet" => Network::Signet,
47 "regtest" => Network::Regtest,
48 _ => panic!("Invalid network"),
52 pub(crate) fn log_level() -> lightning::util::logger::Level {
53 let level = env::var("RAPID_GOSSIP_SYNC_SERVER_LOG_LEVEL").unwrap_or("info".to_string()).to_lowercase();
54 match level.as_str() {
55 "gossip" => lightning::util::logger::Level::Gossip,
56 "trace" => lightning::util::logger::Level::Trace,
57 "debug" => lightning::util::logger::Level::Debug,
58 "info" => lightning::util::logger::Level::Info,
59 "warn" => lightning::util::logger::Level::Warn,
60 "error" => lightning::util::logger::Level::Error,
61 _ => panic!("Invalid log level"),
65 pub(crate) fn network_graph_cache_path() -> String {
66 format!("{}/network_graph.bin", cache_path())
69 pub(crate) fn cache_path() -> String {
70 let path = env::var("RAPID_GOSSIP_SYNC_SERVER_CACHES_PATH").unwrap_or("./res".to_string()).to_lowercase();
74 pub(crate) fn db_connection_config() -> Config {
75 let mut config = Config::new();
76 let env_name_prefix = if cfg!(test) {
77 "RAPID_GOSSIP_TEST_DB"
79 "RAPID_GOSSIP_SYNC_SERVER_DB"
82 let host = env::var(format!("{}{}", env_name_prefix, "_HOST")).unwrap_or("localhost".to_string());
83 let user = env::var(format!("{}{}", env_name_prefix, "_USER")).unwrap_or("alice".to_string());
84 let db = env::var(format!("{}{}", env_name_prefix, "_NAME")).unwrap_or("ln_graph_sync".to_string());
88 if let Ok(password) = env::var(format!("{}{}", env_name_prefix, "_PASSWORD")) {
89 config.password(&password);
94 pub(crate) fn bitcoin_rest_endpoint() -> HttpEndpoint {
95 let host = env::var("BITCOIN_REST_DOMAIN").unwrap_or("127.0.0.1".to_string());
96 let port = env::var("BITCOIN_REST_PORT")
97 .unwrap_or("8332".to_string())
99 .expect("BITCOIN_REST_PORT env variable must be a u16.");
100 let path = env::var("BITCOIN_REST_PATH").unwrap_or("/rest/".to_string());
101 HttpEndpoint::for_host(host).with_port(port).with_path(path)
104 pub(crate) fn db_config_table_creation_query() -> &'static str {
105 "CREATE TABLE IF NOT EXISTS config (
106 id SERIAL PRIMARY KEY,
111 pub(crate) fn db_announcement_table_creation_query() -> &'static str {
112 "CREATE TABLE IF NOT EXISTS channel_announcements (
113 id SERIAL PRIMARY KEY,
114 short_channel_id bigint NOT NULL UNIQUE,
115 announcement_signed BYTEA,
116 seen timestamp NOT NULL DEFAULT NOW()
120 pub(crate) fn db_channel_update_table_creation_query() -> &'static str {
121 "CREATE TABLE IF NOT EXISTS channel_updates (
122 id SERIAL PRIMARY KEY,
123 short_channel_id bigint NOT NULL,
124 timestamp bigint NOT NULL,
125 channel_flags smallint NOT NULL,
126 direction boolean NOT NULL,
127 disable boolean NOT NULL,
128 cltv_expiry_delta integer NOT NULL,
129 htlc_minimum_msat bigint NOT NULL,
130 fee_base_msat integer NOT NULL,
131 fee_proportional_millionths integer NOT NULL,
132 htlc_maximum_msat bigint NOT NULL,
133 blob_signed BYTEA NOT NULL,
134 seen timestamp NOT NULL DEFAULT NOW()
138 pub(crate) fn db_index_creation_query() -> &'static str {
140 CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
141 CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_asc ON channel_updates(short_channel_id, direction, seen);
142 CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_desc_with_id ON channel_updates(short_channel_id ASC, direction ASC, seen DESC) INCLUDE (id);
143 CREATE UNIQUE INDEX IF NOT EXISTS channel_updates_key ON channel_updates (short_channel_id, direction, timestamp);
144 CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen);
145 CREATE INDEX IF NOT EXISTS channel_updates_scid_asc_timestamp_desc ON channel_updates(short_channel_id ASC, timestamp DESC);
149 pub(crate) async fn upgrade_db(schema: i32, client: &mut tokio_postgres::Client) {
151 let tx = client.transaction().await.unwrap();
152 tx.execute("ALTER TABLE channel_updates DROP COLUMN chain_hash", &[]).await.unwrap();
153 tx.execute("ALTER TABLE channel_announcements DROP COLUMN chain_hash", &[]).await.unwrap();
154 tx.execute("UPDATE config SET db_schema = 2 WHERE id = 1", &[]).await.unwrap();
155 tx.commit().await.unwrap();
157 if schema == 1 || schema == 2 {
158 let tx = client.transaction().await.unwrap();
159 tx.execute("ALTER TABLE channel_updates DROP COLUMN short_channel_id", &[]).await.unwrap();
160 tx.execute("ALTER TABLE channel_updates ADD COLUMN short_channel_id bigint DEFAULT null", &[]).await.unwrap();
161 tx.execute("ALTER TABLE channel_updates DROP COLUMN direction", &[]).await.unwrap();
162 tx.execute("ALTER TABLE channel_updates ADD COLUMN direction boolean DEFAULT null", &[]).await.unwrap();
164 let rows = tx.query("SELECT id, composite_index FROM channel_updates WHERE short_channel_id IS NULL LIMIT 50000", &[]).await.unwrap();
165 if rows.is_empty() { break; }
166 let mut updates = FuturesUnordered::new();
168 let id: i32 = row.get("id");
169 let index: String = row.get("composite_index");
171 updates.push(async move {
172 let mut index_iter = index.split(":");
173 let scid_hex = index_iter.next().unwrap();
174 index_iter.next().unwrap();
175 let direction_str = index_iter.next().unwrap();
176 assert!(direction_str == "1" || direction_str == "0");
177 let direction = direction_str == "1";
178 let scid_be_bytes = hex_utils::to_vec(scid_hex).unwrap();
179 let scid = i64::from_be_bytes(scid_be_bytes.try_into().unwrap());
180 assert!(scid > 0); // Will roll over in some 150 years or so
181 tx_ref.execute("UPDATE channel_updates SET short_channel_id = $1, direction = $2 WHERE id = $3", &[&scid, &direction, &id]).await.unwrap();
184 while let Some(_) = updates.next().await {}
186 tx.execute("ALTER TABLE channel_updates ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
187 tx.execute("ALTER TABLE channel_updates ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
188 tx.execute("ALTER TABLE channel_updates ALTER direction DROP DEFAULT", &[]).await.unwrap();
189 tx.execute("ALTER TABLE channel_updates ALTER direction SET NOT NULL", &[]).await.unwrap();
190 tx.execute("UPDATE config SET db_schema = 3 WHERE id = 1", &[]).await.unwrap();
191 tx.commit().await.unwrap();
193 if schema >= 1 && schema <= 3 {
194 let tx = client.transaction().await.unwrap();
195 tx.execute("ALTER TABLE channel_announcements DROP COLUMN short_channel_id", &[]).await.unwrap();
196 tx.execute("ALTER TABLE channel_announcements ADD COLUMN short_channel_id bigint DEFAULT null", &[]).await.unwrap();
198 let rows = tx.query("SELECT id, announcement_signed FROM channel_announcements WHERE short_channel_id IS NULL LIMIT 10000", &[]).await.unwrap();
199 if rows.is_empty() { break; }
200 let mut updates = FuturesUnordered::new();
202 let id: i32 = row.get("id");
203 let announcement: Vec<u8> = row.get("announcement_signed");
205 updates.push(async move {
206 let scid = ChannelAnnouncement::read(&mut Cursor::new(announcement)).unwrap().contents.short_channel_id as i64;
207 assert!(scid > 0); // Will roll over in some 150 years or so
208 tx_ref.execute("UPDATE channel_announcements SET short_channel_id = $1 WHERE id = $2", &[&scid, &id]).await.unwrap();
211 while let Some(_) = updates.next().await {}
213 tx.execute("ALTER TABLE channel_announcements ADD CONSTRAINT channel_announcements_short_channel_id_key UNIQUE (short_channel_id)", &[]).await.unwrap();
214 tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
215 tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
216 tx.execute("UPDATE config SET db_schema = 4 WHERE id = 1", &[]).await.unwrap();
217 tx.commit().await.unwrap();
219 if schema >= 1 && schema <= 4 {
220 let tx = client.transaction().await.unwrap();
221 tx.execute("ALTER TABLE channel_updates ALTER composite_index SET DATA TYPE character(29)", &[]).await.unwrap();
222 tx.execute("UPDATE config SET db_schema = 5 WHERE id = 1", &[]).await.unwrap();
223 tx.commit().await.unwrap();
225 if schema >= 1 && schema <= 5 {
226 let tx = client.transaction().await.unwrap();
227 tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET DATA TYPE smallint", &[]).await.unwrap();
228 tx.execute("ALTER TABLE channel_announcements DROP COLUMN block_height", &[]).await.unwrap();
229 tx.execute("UPDATE config SET db_schema = 6 WHERE id = 1", &[]).await.unwrap();
230 tx.commit().await.unwrap();
232 if schema >= 1 && schema <= 6 {
233 let tx = client.transaction().await.unwrap();
234 tx.execute("ALTER TABLE channel_updates DROP COLUMN composite_index", &[]).await.unwrap();
235 tx.execute("ALTER TABLE channel_updates ALTER timestamp SET NOT NULL", &[]).await.unwrap();
236 tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET NOT NULL", &[]).await.unwrap();
237 tx.execute("ALTER TABLE channel_updates ALTER disable SET NOT NULL", &[]).await.unwrap();
238 tx.execute("ALTER TABLE channel_updates ALTER cltv_expiry_delta SET NOT NULL", &[]).await.unwrap();
239 tx.execute("ALTER TABLE channel_updates ALTER htlc_minimum_msat SET NOT NULL", &[]).await.unwrap();
240 tx.execute("ALTER TABLE channel_updates ALTER fee_base_msat SET NOT NULL", &[]).await.unwrap();
241 tx.execute("ALTER TABLE channel_updates ALTER fee_proportional_millionths SET NOT NULL", &[]).await.unwrap();
242 tx.execute("ALTER TABLE channel_updates ALTER htlc_maximum_msat SET NOT NULL", &[]).await.unwrap();
243 tx.execute("ALTER TABLE channel_updates ALTER blob_signed SET NOT NULL", &[]).await.unwrap();
244 tx.execute("CREATE UNIQUE INDEX channel_updates_key ON channel_updates (short_channel_id, direction, timestamp)", &[]).await.unwrap();
245 tx.execute("UPDATE config SET db_schema = 7 WHERE id = 1", &[]).await.unwrap();
246 tx.commit().await.unwrap();
248 if schema >= 1 && schema <= 7 {
249 let tx = client.transaction().await.unwrap();
250 tx.execute("DROP INDEX IF EXISTS channels_seen", &[]).await.unwrap();
251 tx.execute("DROP INDEX IF EXISTS channel_updates_scid", &[]).await.unwrap();
252 tx.execute("DROP INDEX IF EXISTS channel_updates_direction", &[]).await.unwrap();
253 tx.execute("DROP INDEX IF EXISTS channel_updates_seen", &[]).await.unwrap();
254 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_seen", &[]).await.unwrap();
255 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_dir_seen", &[]).await.unwrap();
256 tx.execute("UPDATE config SET db_schema = 8 WHERE id = 1", &[]).await.unwrap();
257 tx.commit().await.unwrap();
259 if schema >= 1 && schema <= 8 {
260 let tx = client.transaction().await.unwrap();
261 tx.execute("DROP INDEX IF EXISTS channel_updates_seen", &[]).await.unwrap();
262 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_seen", &[]).await.unwrap();
263 tx.execute("UPDATE config SET db_schema = 9 WHERE id = 1", &[]).await.unwrap();
264 tx.commit().await.unwrap();
266 if schema >= 1 && schema <= 9 {
267 let tx = client.transaction().await.unwrap();
268 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_dir_seen", &[]).await.unwrap();
269 tx.execute("UPDATE config SET db_schema = 10 WHERE id = 1", &[]).await.unwrap();
270 tx.commit().await.unwrap();
272 if schema >= 1 && schema <= 10 {
273 let tx = client.transaction().await.unwrap();
274 tx.execute("DROP INDEX IF EXISTS channel_updates_id_with_scid_dir_blob", &[]).await.unwrap();
275 tx.execute("UPDATE config SET db_schema = 11 WHERE id = 1", &[]).await.unwrap();
276 tx.commit().await.unwrap();
278 if schema >= 1 && schema <= 11 {
279 let tx = client.transaction().await.unwrap();
280 tx.execute("DROP INDEX IF EXISTS channel_updates_seen_with_id_direction_blob", &[]).await.unwrap();
281 tx.execute("UPDATE config SET db_schema = 12 WHERE id = 1", &[]).await.unwrap();
282 tx.commit().await.unwrap();
284 if schema >= 1 && schema <= 12 {
285 let tx = client.transaction().await.unwrap();
286 tx.execute("DROP INDEX IF EXISTS channel_updates_timestamp_desc", &[]).await.unwrap();
287 tx.execute("UPDATE config SET db_schema = 13 WHERE id = 1", &[]).await.unwrap();
288 tx.commit().await.unwrap();
290 if schema <= 1 || schema > SCHEMA_VERSION {
291 panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
293 // PostgreSQL (at least v13, but likely later versions as well) handles insert-only tables
294 // *very* poorly. After some number of inserts, it refuses to rely on indexes, assuming them to
295 // be possibly-stale, until a VACUUM happens. Thus, we set the vacuum factor really low here,
296 // pushing PostgreSQL to vacuum often.
297 // See https://www.cybertec-postgresql.com/en/postgresql-autovacuum-insert-only-tables/
298 let _ = client.execute("ALTER TABLE channel_updates SET ( autovacuum_vacuum_insert_scale_factor = 0.005 );", &[]).await;
299 let _ = client.execute("ALTER TABLE channel_announcements SET ( autovacuum_vacuum_insert_scale_factor = 0.005 );", &[]).await;
302 pub(crate) fn ln_peers() -> Vec<(PublicKey, SocketAddr)> {
303 const WALLET_OF_SATOSHI: &str = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
304 let list = env::var("LN_PEERS").unwrap_or(WALLET_OF_SATOSHI.to_string());
305 let mut peers = Vec::new();
306 for (item, peer_info) in list.split(',').enumerate() {
307 // Ignore leading or trailing whitespace
308 let trimmed_peer_info = peer_info.trim();
309 // Ignore trailing or repeated commas
310 if !trimmed_peer_info.is_empty() {
311 peers.push(resolve_peer_info(trimmed_peer_info).unwrap_or_else(|_| {
312 panic!("Invalid peer info in LN_PEERS at item {}: {}", item, peer_info)
319 fn resolve_peer_info(peer_info: &str) -> Result<(PublicKey, SocketAddr), &str> {
320 let mut peer_info = peer_info.splitn(2, '@');
322 let pubkey = peer_info.next().ok_or("Invalid peer info. Should be formatted as: `pubkey@host:port`")?;
323 let pubkey = Vec::from_hex(pubkey).map_err(|_| "Invalid node pubkey")?;
324 let pubkey = PublicKey::from_slice(&pubkey).map_err(|_| "Invalid node pubkey")?;
326 let socket_address = peer_info.next().ok_or("Invalid peer info. Should be formatted as: `pubkey@host:port`")?;
327 let socket_address = socket_address
329 .map_err(|_| "Cannot resolve node address")?
331 .ok_or("Cannot resolve node address")?;
333 Ok((pubkey, socket_address))
339 use hex_conservative::DisplayHex;
340 use std::str::FromStr;
343 fn test_resolve_peer_info() {
344 let wallet_of_satoshi = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
345 let (pubkey, socket_address) = resolve_peer_info(wallet_of_satoshi).unwrap();
347 pubkey.serialize().to_lower_hex_string(),
348 "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226"
350 assert_eq!(socket_address.to_string(), "170.75.163.209:9735");
352 let ipv6 = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@[2001:db8::1]:80";
353 let (pubkey, socket_address) = resolve_peer_info(ipv6).unwrap();
355 pubkey.serialize().to_lower_hex_string(),
356 "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025"
358 assert_eq!(socket_address.to_string(), "[2001:db8::1]:80");
360 let localhost = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@localhost:9735";
361 let (pubkey, socket_address) = resolve_peer_info(localhost).unwrap();
363 pubkey.serialize().to_lower_hex_string(),
364 "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025"
366 let socket_address = socket_address.to_string();
367 assert!(socket_address == "127.0.0.1:9735" || socket_address == "[::1]:9735");
372 // Set the environment variable, including a repeated comma, leading space, and trailing comma.
373 std::env::set_var("LN_PEERS", "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735,, 035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227@170.75.163.210:9735,");
374 let peers = ln_peers();
376 // Assert output is as expected
381 PublicKey::from_str("035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226").unwrap(),
382 SocketAddr::from_str("170.75.163.209:9735").unwrap()
385 PublicKey::from_str("035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227").unwrap(),
386 SocketAddr::from_str("170.75.163.210:9735").unwrap()