3 use std::convert::TryInto;
6 use std::net::{SocketAddr, ToSocketAddrs};
7 use std::time::Duration;
10 use bitcoin::hashes::hex::FromHex;
11 use bitcoin::secp256k1::PublicKey;
12 use futures::stream::{FuturesUnordered, StreamExt};
13 use lightning::ln::msgs::ChannelAnnouncement;
14 use lightning::util::ser::Readable;
15 use lightning_block_sync::http::HttpEndpoint;
16 use tokio_postgres::Config;
18 pub(crate) const SCHEMA_VERSION: i32 = 13;
19 pub(crate) const SYMLINK_GRANULARITY_INTERVAL: u32 = 3600 * 3; // three hours
20 pub(crate) const MAX_SNAPSHOT_SCOPE: u32 = 3600 * 24 * 21; // three weeks
21 // generate symlinks based on a 3-hour-granularity
22 /// If the last update in either direction was more than six days ago, we send a reminder
23 /// That reminder may be either in the form of a channel announcement, or in the form of empty
24 /// updates in both directions.
25 pub(crate) const CHANNEL_REMINDER_AGE: Duration = Duration::from_secs(6 * 24 * 60 * 60);
26 /// The number of successful peer connections to await prior to continuing to gossip storage.
27 /// The application will still work if the number of specified peers is lower, as long as there is
28 /// at least one successful peer connection, but it may result in long startup times.
29 pub(crate) const CONNECTED_PEER_ASSERTION_LIMIT: usize = 5;
30 pub(crate) const DOWNLOAD_NEW_GOSSIP: bool = true;
32 pub(crate) fn snapshot_generation_interval() -> u32 {
33 let interval = env::var("RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL").unwrap_or(SYMLINK_GRANULARITY_INTERVAL.to_string())
35 .expect("RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL env variable must be a u32.");
36 assert!(interval > 0, "RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL must be positive");
37 assert_eq!(interval % SYMLINK_GRANULARITY_INTERVAL, 0, "RAPID_GOSSIP_SYNC_SERVER_SNAPSHOT_INTERVAL must be a multiple of {} (seconds)", SYMLINK_GRANULARITY_INTERVAL);
41 pub(crate) fn network() -> Network {
42 let network = env::var("RAPID_GOSSIP_SYNC_SERVER_NETWORK").unwrap_or("bitcoin".to_string()).to_lowercase();
43 match network.as_str() {
44 "mainnet" => Network::Bitcoin,
45 "bitcoin" => Network::Bitcoin,
46 "testnet" => Network::Testnet,
47 "signet" => Network::Signet,
48 "regtest" => Network::Regtest,
49 _ => panic!("Invalid network"),
53 pub(crate) fn log_level() -> lightning::util::logger::Level {
54 let level = env::var("RAPID_GOSSIP_SYNC_SERVER_LOG_LEVEL").unwrap_or("info".to_string()).to_lowercase();
55 match level.as_str() {
56 "gossip" => lightning::util::logger::Level::Gossip,
57 "trace" => lightning::util::logger::Level::Trace,
58 "debug" => lightning::util::logger::Level::Debug,
59 "info" => lightning::util::logger::Level::Info,
60 "warn" => lightning::util::logger::Level::Warn,
61 "error" => lightning::util::logger::Level::Error,
62 _ => panic!("Invalid log level"),
66 pub(crate) fn network_graph_cache_path() -> String {
67 format!("{}/network_graph.bin", cache_path())
70 pub(crate) fn cache_path() -> String {
71 let path = env::var("RAPID_GOSSIP_SYNC_SERVER_CACHES_PATH").unwrap_or("./res".to_string()).to_lowercase();
75 pub(crate) fn db_connection_config() -> Config {
76 let mut config = Config::new();
77 let env_name_prefix = if cfg!(test) {
78 "RAPID_GOSSIP_TEST_DB"
80 "RAPID_GOSSIP_SYNC_SERVER_DB"
83 let host = env::var(format!("{}{}", env_name_prefix, "_HOST")).unwrap_or("localhost".to_string());
84 let user = env::var(format!("{}{}", env_name_prefix, "_USER")).unwrap_or("alice".to_string());
85 let db = env::var(format!("{}{}", env_name_prefix, "_NAME")).unwrap_or("ln_graph_sync".to_string());
89 if let Ok(password) = env::var(format!("{}{}", env_name_prefix, "_PASSWORD")) {
90 config.password(&password);
95 pub(crate) fn bitcoin_rest_endpoint() -> HttpEndpoint {
96 let host = env::var("BITCOIN_REST_DOMAIN").unwrap_or("127.0.0.1".to_string());
97 let port = env::var("BITCOIN_REST_PORT")
98 .unwrap_or("8332".to_string())
100 .expect("BITCOIN_REST_PORT env variable must be a u16.");
101 let path = env::var("BITCOIN_REST_PATH").unwrap_or("/rest/".to_string());
102 HttpEndpoint::for_host(host).with_port(port).with_path(path)
105 pub(crate) fn db_config_table_creation_query() -> &'static str {
106 "CREATE TABLE IF NOT EXISTS config (
107 id SERIAL PRIMARY KEY,
112 pub(crate) fn db_announcement_table_creation_query() -> &'static str {
113 "CREATE TABLE IF NOT EXISTS channel_announcements (
114 id SERIAL PRIMARY KEY,
115 short_channel_id bigint NOT NULL UNIQUE,
116 announcement_signed BYTEA,
117 seen timestamp NOT NULL DEFAULT NOW()
121 pub(crate) fn db_channel_update_table_creation_query() -> &'static str {
122 "CREATE TABLE IF NOT EXISTS channel_updates (
123 id SERIAL PRIMARY KEY,
124 short_channel_id bigint NOT NULL,
125 timestamp bigint NOT NULL,
126 channel_flags smallint NOT NULL,
127 direction boolean NOT NULL,
128 disable boolean NOT NULL,
129 cltv_expiry_delta integer NOT NULL,
130 htlc_minimum_msat bigint NOT NULL,
131 fee_base_msat integer NOT NULL,
132 fee_proportional_millionths integer NOT NULL,
133 htlc_maximum_msat bigint NOT NULL,
134 blob_signed BYTEA NOT NULL,
135 seen timestamp NOT NULL DEFAULT NOW()
139 pub(crate) fn db_index_creation_query() -> &'static str {
141 CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
142 CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_asc ON channel_updates(short_channel_id, direction, seen);
143 CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_desc_with_id ON channel_updates(short_channel_id ASC, direction ASC, seen DESC) INCLUDE (id);
144 CREATE UNIQUE INDEX IF NOT EXISTS channel_updates_key ON channel_updates (short_channel_id, direction, timestamp);
145 CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen);
146 CREATE INDEX IF NOT EXISTS channel_updates_scid_asc_timestamp_desc ON channel_updates(short_channel_id ASC, timestamp DESC);
150 pub(crate) async fn upgrade_db(schema: i32, client: &mut tokio_postgres::Client) {
152 let tx = client.transaction().await.unwrap();
153 tx.execute("ALTER TABLE channel_updates DROP COLUMN chain_hash", &[]).await.unwrap();
154 tx.execute("ALTER TABLE channel_announcements DROP COLUMN chain_hash", &[]).await.unwrap();
155 tx.execute("UPDATE config SET db_schema = 2 WHERE id = 1", &[]).await.unwrap();
156 tx.commit().await.unwrap();
158 if schema == 1 || schema == 2 {
159 let tx = client.transaction().await.unwrap();
160 tx.execute("ALTER TABLE channel_updates DROP COLUMN short_channel_id", &[]).await.unwrap();
161 tx.execute("ALTER TABLE channel_updates ADD COLUMN short_channel_id bigint DEFAULT null", &[]).await.unwrap();
162 tx.execute("ALTER TABLE channel_updates DROP COLUMN direction", &[]).await.unwrap();
163 tx.execute("ALTER TABLE channel_updates ADD COLUMN direction boolean DEFAULT null", &[]).await.unwrap();
165 let rows = tx.query("SELECT id, composite_index FROM channel_updates WHERE short_channel_id IS NULL LIMIT 50000", &[]).await.unwrap();
166 if rows.is_empty() { break; }
167 let mut updates = FuturesUnordered::new();
169 let id: i32 = row.get("id");
170 let index: String = row.get("composite_index");
172 updates.push(async move {
173 let mut index_iter = index.split(":");
174 let scid_hex = index_iter.next().unwrap();
175 index_iter.next().unwrap();
176 let direction_str = index_iter.next().unwrap();
177 assert!(direction_str == "1" || direction_str == "0");
178 let direction = direction_str == "1";
179 let scid_be_bytes = hex_utils::to_vec(scid_hex).unwrap();
180 let scid = i64::from_be_bytes(scid_be_bytes.try_into().unwrap());
181 assert!(scid > 0); // Will roll over in some 150 years or so
182 tx_ref.execute("UPDATE channel_updates SET short_channel_id = $1, direction = $2 WHERE id = $3", &[&scid, &direction, &id]).await.unwrap();
185 while let Some(_) = updates.next().await {}
187 tx.execute("ALTER TABLE channel_updates ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
188 tx.execute("ALTER TABLE channel_updates ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
189 tx.execute("ALTER TABLE channel_updates ALTER direction DROP DEFAULT", &[]).await.unwrap();
190 tx.execute("ALTER TABLE channel_updates ALTER direction SET NOT NULL", &[]).await.unwrap();
191 tx.execute("UPDATE config SET db_schema = 3 WHERE id = 1", &[]).await.unwrap();
192 tx.commit().await.unwrap();
194 if schema >= 1 && schema <= 3 {
195 let tx = client.transaction().await.unwrap();
196 tx.execute("ALTER TABLE channel_announcements DROP COLUMN short_channel_id", &[]).await.unwrap();
197 tx.execute("ALTER TABLE channel_announcements ADD COLUMN short_channel_id bigint DEFAULT null", &[]).await.unwrap();
199 let rows = tx.query("SELECT id, announcement_signed FROM channel_announcements WHERE short_channel_id IS NULL LIMIT 10000", &[]).await.unwrap();
200 if rows.is_empty() { break; }
201 let mut updates = FuturesUnordered::new();
203 let id: i32 = row.get("id");
204 let announcement: Vec<u8> = row.get("announcement_signed");
206 updates.push(async move {
207 let scid = ChannelAnnouncement::read(&mut Cursor::new(announcement)).unwrap().contents.short_channel_id as i64;
208 assert!(scid > 0); // Will roll over in some 150 years or so
209 tx_ref.execute("UPDATE channel_announcements SET short_channel_id = $1 WHERE id = $2", &[&scid, &id]).await.unwrap();
212 while let Some(_) = updates.next().await {}
214 tx.execute("ALTER TABLE channel_announcements ADD CONSTRAINT channel_announcements_short_channel_id_key UNIQUE (short_channel_id)", &[]).await.unwrap();
215 tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
216 tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
217 tx.execute("UPDATE config SET db_schema = 4 WHERE id = 1", &[]).await.unwrap();
218 tx.commit().await.unwrap();
220 if schema >= 1 && schema <= 4 {
221 let tx = client.transaction().await.unwrap();
222 tx.execute("ALTER TABLE channel_updates ALTER composite_index SET DATA TYPE character(29)", &[]).await.unwrap();
223 tx.execute("UPDATE config SET db_schema = 5 WHERE id = 1", &[]).await.unwrap();
224 tx.commit().await.unwrap();
226 if schema >= 1 && schema <= 5 {
227 let tx = client.transaction().await.unwrap();
228 tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET DATA TYPE smallint", &[]).await.unwrap();
229 tx.execute("ALTER TABLE channel_announcements DROP COLUMN block_height", &[]).await.unwrap();
230 tx.execute("UPDATE config SET db_schema = 6 WHERE id = 1", &[]).await.unwrap();
231 tx.commit().await.unwrap();
233 if schema >= 1 && schema <= 6 {
234 let tx = client.transaction().await.unwrap();
235 tx.execute("ALTER TABLE channel_updates DROP COLUMN composite_index", &[]).await.unwrap();
236 tx.execute("ALTER TABLE channel_updates ALTER timestamp SET NOT NULL", &[]).await.unwrap();
237 tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET NOT NULL", &[]).await.unwrap();
238 tx.execute("ALTER TABLE channel_updates ALTER disable SET NOT NULL", &[]).await.unwrap();
239 tx.execute("ALTER TABLE channel_updates ALTER cltv_expiry_delta SET NOT NULL", &[]).await.unwrap();
240 tx.execute("ALTER TABLE channel_updates ALTER htlc_minimum_msat SET NOT NULL", &[]).await.unwrap();
241 tx.execute("ALTER TABLE channel_updates ALTER fee_base_msat SET NOT NULL", &[]).await.unwrap();
242 tx.execute("ALTER TABLE channel_updates ALTER fee_proportional_millionths SET NOT NULL", &[]).await.unwrap();
243 tx.execute("ALTER TABLE channel_updates ALTER htlc_maximum_msat SET NOT NULL", &[]).await.unwrap();
244 tx.execute("ALTER TABLE channel_updates ALTER blob_signed SET NOT NULL", &[]).await.unwrap();
245 tx.execute("CREATE UNIQUE INDEX channel_updates_key ON channel_updates (short_channel_id, direction, timestamp)", &[]).await.unwrap();
246 tx.execute("UPDATE config SET db_schema = 7 WHERE id = 1", &[]).await.unwrap();
247 tx.commit().await.unwrap();
249 if schema >= 1 && schema <= 7 {
250 let tx = client.transaction().await.unwrap();
251 tx.execute("DROP INDEX IF EXISTS channels_seen", &[]).await.unwrap();
252 tx.execute("DROP INDEX IF EXISTS channel_updates_scid", &[]).await.unwrap();
253 tx.execute("DROP INDEX IF EXISTS channel_updates_direction", &[]).await.unwrap();
254 tx.execute("DROP INDEX IF EXISTS channel_updates_seen", &[]).await.unwrap();
255 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_seen", &[]).await.unwrap();
256 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_dir_seen", &[]).await.unwrap();
257 tx.execute("UPDATE config SET db_schema = 8 WHERE id = 1", &[]).await.unwrap();
258 tx.commit().await.unwrap();
260 if schema >= 1 && schema <= 8 {
261 let tx = client.transaction().await.unwrap();
262 tx.execute("DROP INDEX IF EXISTS channel_updates_seen", &[]).await.unwrap();
263 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_seen", &[]).await.unwrap();
264 tx.execute("UPDATE config SET db_schema = 9 WHERE id = 1", &[]).await.unwrap();
265 tx.commit().await.unwrap();
267 if schema >= 1 && schema <= 9 {
268 let tx = client.transaction().await.unwrap();
269 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_dir_seen", &[]).await.unwrap();
270 tx.execute("UPDATE config SET db_schema = 10 WHERE id = 1", &[]).await.unwrap();
271 tx.commit().await.unwrap();
273 if schema >= 1 && schema <= 10 {
274 let tx = client.transaction().await.unwrap();
275 tx.execute("DROP INDEX IF EXISTS channel_updates_id_with_scid_dir_blob", &[]).await.unwrap();
276 tx.execute("UPDATE config SET db_schema = 11 WHERE id = 1", &[]).await.unwrap();
277 tx.commit().await.unwrap();
279 if schema >= 1 && schema <= 11 {
280 let tx = client.transaction().await.unwrap();
281 tx.execute("DROP INDEX IF EXISTS channel_updates_seen_with_id_direction_blob", &[]).await.unwrap();
282 tx.execute("UPDATE config SET db_schema = 12 WHERE id = 1", &[]).await.unwrap();
283 tx.commit().await.unwrap();
285 if schema >= 1 && schema <= 12 {
286 let tx = client.transaction().await.unwrap();
287 tx.execute("DROP INDEX IF EXISTS channel_updates_timestamp_desc", &[]).await.unwrap();
288 tx.execute("UPDATE config SET db_schema = 13 WHERE id = 1", &[]).await.unwrap();
289 tx.commit().await.unwrap();
291 if schema <= 1 || schema > SCHEMA_VERSION {
292 panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
294 // PostgreSQL (at least v13, but likely later versions as well) handles insert-only tables
295 // *very* poorly. After some number of inserts, it refuses to rely on indexes, assuming them to
296 // be possibly-stale, until a VACUUM happens. Thus, we set the vacuum factor really low here,
297 // pushing PostgreSQL to vacuum often.
298 // See https://www.cybertec-postgresql.com/en/postgresql-autovacuum-insert-only-tables/
299 let _ = client.execute("ALTER TABLE channel_updates SET ( autovacuum_vacuum_insert_scale_factor = 0.005 );", &[]).await;
300 let _ = client.execute("ALTER TABLE channel_announcements SET ( autovacuum_vacuum_insert_scale_factor = 0.005 );", &[]).await;
303 pub(crate) fn ln_peers() -> Vec<(PublicKey, SocketAddr)> {
304 const WALLET_OF_SATOSHI: &str = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
305 let list = env::var("LN_PEERS").unwrap_or(WALLET_OF_SATOSHI.to_string());
306 let mut peers = Vec::new();
307 for (item, peer_info) in list.split(',').enumerate() {
308 // Ignore leading or trailing whitespace
309 let trimmed_peer_info = peer_info.trim();
310 // Ignore trailing or repeated commas
311 if !trimmed_peer_info.is_empty() {
312 peers.push(resolve_peer_info(trimmed_peer_info).unwrap_or_else(|_| {
313 panic!("Invalid peer info in LN_PEERS at item {}: {}", item, peer_info)
320 fn resolve_peer_info(peer_info: &str) -> Result<(PublicKey, SocketAddr), &str> {
321 let mut peer_info = peer_info.splitn(2, '@');
323 let pubkey = peer_info.next().ok_or("Invalid peer info. Should be formatted as: `pubkey@host:port`")?;
324 let pubkey = Vec::from_hex(pubkey).map_err(|_| "Invalid node pubkey")?;
325 let pubkey = PublicKey::from_slice(&pubkey).map_err(|_| "Invalid node pubkey")?;
327 let socket_address = peer_info.next().ok_or("Invalid peer info. Should be formatted as: `pubkey@host:port`")?;
328 let socket_address = socket_address
330 .map_err(|_| "Cannot resolve node address")?
332 .ok_or("Cannot resolve node address")?;
334 Ok((pubkey, socket_address))
340 use bitcoin::hashes::hex::ToHex;
341 use std::str::FromStr;
344 fn test_resolve_peer_info() {
345 let wallet_of_satoshi = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
346 let (pubkey, socket_address) = resolve_peer_info(wallet_of_satoshi).unwrap();
347 assert_eq!(pubkey.serialize().to_hex(), "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226");
348 assert_eq!(socket_address.to_string(), "170.75.163.209:9735");
350 let ipv6 = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@[2001:db8::1]:80";
351 let (pubkey, socket_address) = resolve_peer_info(ipv6).unwrap();
352 assert_eq!(pubkey.serialize().to_hex(), "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025");
353 assert_eq!(socket_address.to_string(), "[2001:db8::1]:80");
355 let localhost = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@localhost:9735";
356 let (pubkey, socket_address) = resolve_peer_info(localhost).unwrap();
357 assert_eq!(pubkey.serialize().to_hex(), "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025");
358 let socket_address = socket_address.to_string();
359 assert!(socket_address == "127.0.0.1:9735" || socket_address == "[::1]:9735");
364 // Set the environment variable, including a repeated comma, leading space, and trailing comma.
365 std::env::set_var("LN_PEERS", "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735,, 035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227@170.75.163.210:9735,");
366 let peers = ln_peers();
368 // Assert output is as expected
373 PublicKey::from_str("035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226").unwrap(),
374 SocketAddr::from_str("170.75.163.209:9735").unwrap()
377 PublicKey::from_str("035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227").unwrap(),
378 SocketAddr::from_str("170.75.163.210:9735").unwrap()