3 use std::convert::TryInto;
6 use std::net::{SocketAddr, ToSocketAddrs};
7 use std::time::Duration;
10 use bitcoin::hashes::hex::FromHex;
11 use bitcoin::secp256k1::PublicKey;
12 use futures::stream::{FuturesUnordered, StreamExt};
13 use lightning::ln::msgs::ChannelAnnouncement;
14 use lightning::util::ser::Readable;
15 use lightning_block_sync::http::HttpEndpoint;
16 use tokio_postgres::Config;
18 pub(crate) const SCHEMA_VERSION: i32 = 13;
19 pub(crate) const SNAPSHOT_CALCULATION_INTERVAL: u32 = 3600 * 24; // every 24 hours, in seconds
20 /// If the last update in either direction was more than six days ago, we send a reminder
21 /// That reminder may be either in the form of a channel announcement, or in the form of empty
22 /// updates in both directions.
23 pub(crate) const CHANNEL_REMINDER_AGE: Duration = Duration::from_secs(6 * 24 * 60 * 60);
24 pub(crate) const DOWNLOAD_NEW_GOSSIP: bool = true;
26 pub(crate) fn calculate_interval() -> u32 {
27 let interval = env::var("RAPID_GOSSIP_SYNC_CALC_INTERVAL").unwrap_or("86400".to_string())
29 .expect("RAPID_GOSSIP_SYNC_CALC_INTERVAL env variable must be a u32.");
33 pub(crate) fn network() -> Network {
34 let network = env::var("RAPID_GOSSIP_SYNC_SERVER_NETWORK").unwrap_or("bitcoin".to_string()).to_lowercase();
35 match network.as_str() {
36 "mainnet" => Network::Bitcoin,
37 "bitcoin" => Network::Bitcoin,
38 "testnet" => Network::Testnet,
39 "signet" => Network::Signet,
40 "regtest" => Network::Regtest,
41 _ => panic!("Invalid network"),
45 pub(crate) fn log_level() -> lightning::util::logger::Level {
46 let level = env::var("RAPID_GOSSIP_SYNC_SERVER_LOG_LEVEL").unwrap_or("info".to_string()).to_lowercase();
47 match level.as_str() {
48 "gossip" => lightning::util::logger::Level::Gossip,
49 "trace" => lightning::util::logger::Level::Trace,
50 "debug" => lightning::util::logger::Level::Debug,
51 "info" => lightning::util::logger::Level::Info,
52 "warn" => lightning::util::logger::Level::Warn,
53 "error" => lightning::util::logger::Level::Error,
54 _ => panic!("Invalid log level"),
58 pub(crate) fn network_graph_cache_path() -> String {
59 format!("{}/network_graph.bin", cache_path())
62 pub(crate) fn cache_path() -> String {
63 let path = env::var("RAPID_GOSSIP_SYNC_SERVER_CACHES_PATH").unwrap_or("./res".to_string()).to_lowercase();
67 pub(crate) fn db_connection_config() -> Config {
68 let mut config = Config::new();
69 let host = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_HOST").unwrap_or("localhost".to_string());
70 let user = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_USER").unwrap_or("alice".to_string());
71 let db = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_NAME").unwrap_or("ln_graph_sync".to_string());
75 if let Ok(password) = env::var("RAPID_GOSSIP_SYNC_SERVER_DB_PASSWORD") {
76 config.password(&password);
81 pub(crate) fn bitcoin_rest_endpoint() -> HttpEndpoint {
82 let host = env::var("BITCOIN_REST_DOMAIN").unwrap_or("127.0.0.1".to_string());
83 let port = env::var("BITCOIN_REST_PORT")
84 .unwrap_or("8332".to_string())
86 .expect("BITCOIN_REST_PORT env variable must be a u16.");
87 let path = env::var("BITCOIN_REST_PATH").unwrap_or("/rest/".to_string());
88 HttpEndpoint::for_host(host).with_port(port).with_path(path)
91 pub(crate) fn db_config_table_creation_query() -> &'static str {
92 "CREATE TABLE IF NOT EXISTS config (
93 id SERIAL PRIMARY KEY,
98 pub(crate) fn db_announcement_table_creation_query() -> &'static str {
99 "CREATE TABLE IF NOT EXISTS channel_announcements (
100 id SERIAL PRIMARY KEY,
101 short_channel_id bigint NOT NULL UNIQUE,
102 announcement_signed BYTEA,
103 seen timestamp NOT NULL DEFAULT NOW()
107 pub(crate) fn db_channel_update_table_creation_query() -> &'static str {
108 "CREATE TABLE IF NOT EXISTS channel_updates (
109 id SERIAL PRIMARY KEY,
110 short_channel_id bigint NOT NULL,
111 timestamp bigint NOT NULL,
112 channel_flags smallint NOT NULL,
113 direction boolean NOT NULL,
114 disable boolean NOT NULL,
115 cltv_expiry_delta integer NOT NULL,
116 htlc_minimum_msat bigint NOT NULL,
117 fee_base_msat integer NOT NULL,
118 fee_proportional_millionths integer NOT NULL,
119 htlc_maximum_msat bigint NOT NULL,
120 blob_signed BYTEA NOT NULL,
121 seen timestamp NOT NULL DEFAULT NOW()
125 pub(crate) fn db_index_creation_query() -> &'static str {
127 CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
128 CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_asc ON channel_updates(short_channel_id, direction, seen);
129 CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_desc_with_id ON channel_updates(short_channel_id ASC, direction ASC, seen DESC) INCLUDE (id);
130 CREATE UNIQUE INDEX IF NOT EXISTS channel_updates_key ON channel_updates (short_channel_id, direction, timestamp);
131 CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen);
132 CREATE INDEX IF NOT EXISTS channel_updates_timestamp_desc ON channel_updates(timestamp DESC);
136 pub(crate) async fn upgrade_db(schema: i32, client: &mut tokio_postgres::Client) {
138 let tx = client.transaction().await.unwrap();
139 tx.execute("ALTER TABLE channel_updates DROP COLUMN chain_hash", &[]).await.unwrap();
140 tx.execute("ALTER TABLE channel_announcements DROP COLUMN chain_hash", &[]).await.unwrap();
141 tx.execute("UPDATE config SET db_schema = 2 WHERE id = 1", &[]).await.unwrap();
142 tx.commit().await.unwrap();
144 if schema == 1 || schema == 2 {
145 let tx = client.transaction().await.unwrap();
146 tx.execute("ALTER TABLE channel_updates DROP COLUMN short_channel_id", &[]).await.unwrap();
147 tx.execute("ALTER TABLE channel_updates ADD COLUMN short_channel_id bigint DEFAULT null", &[]).await.unwrap();
148 tx.execute("ALTER TABLE channel_updates DROP COLUMN direction", &[]).await.unwrap();
149 tx.execute("ALTER TABLE channel_updates ADD COLUMN direction boolean DEFAULT null", &[]).await.unwrap();
151 let rows = tx.query("SELECT id, composite_index FROM channel_updates WHERE short_channel_id IS NULL LIMIT 50000", &[]).await.unwrap();
152 if rows.is_empty() { break; }
153 let mut updates = FuturesUnordered::new();
155 let id: i32 = row.get("id");
156 let index: String = row.get("composite_index");
158 updates.push(async move {
159 let mut index_iter = index.split(":");
160 let scid_hex = index_iter.next().unwrap();
161 index_iter.next().unwrap();
162 let direction_str = index_iter.next().unwrap();
163 assert!(direction_str == "1" || direction_str == "0");
164 let direction = direction_str == "1";
165 let scid_be_bytes = hex_utils::to_vec(scid_hex).unwrap();
166 let scid = i64::from_be_bytes(scid_be_bytes.try_into().unwrap());
167 assert!(scid > 0); // Will roll over in some 150 years or so
168 tx_ref.execute("UPDATE channel_updates SET short_channel_id = $1, direction = $2 WHERE id = $3", &[&scid, &direction, &id]).await.unwrap();
171 while let Some(_) = updates.next().await { }
173 tx.execute("ALTER TABLE channel_updates ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
174 tx.execute("ALTER TABLE channel_updates ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
175 tx.execute("ALTER TABLE channel_updates ALTER direction DROP DEFAULT", &[]).await.unwrap();
176 tx.execute("ALTER TABLE channel_updates ALTER direction SET NOT NULL", &[]).await.unwrap();
177 tx.execute("UPDATE config SET db_schema = 3 WHERE id = 1", &[]).await.unwrap();
178 tx.commit().await.unwrap();
180 if schema >= 1 && schema <= 3 {
181 let tx = client.transaction().await.unwrap();
182 tx.execute("ALTER TABLE channel_announcements DROP COLUMN short_channel_id", &[]).await.unwrap();
183 tx.execute("ALTER TABLE channel_announcements ADD COLUMN short_channel_id bigint DEFAULT null", &[]).await.unwrap();
185 let rows = tx.query("SELECT id, announcement_signed FROM channel_announcements WHERE short_channel_id IS NULL LIMIT 10000", &[]).await.unwrap();
186 if rows.is_empty() { break; }
187 let mut updates = FuturesUnordered::new();
189 let id: i32 = row.get("id");
190 let announcement: Vec<u8> = row.get("announcement_signed");
192 updates.push(async move {
193 let scid = ChannelAnnouncement::read(&mut Cursor::new(announcement)).unwrap().contents.short_channel_id as i64;
194 assert!(scid > 0); // Will roll over in some 150 years or so
195 tx_ref.execute("UPDATE channel_announcements SET short_channel_id = $1 WHERE id = $2", &[&scid, &id]).await.unwrap();
198 while let Some(_) = updates.next().await { }
200 tx.execute("ALTER TABLE channel_announcements ADD CONSTRAINT channel_announcements_short_channel_id_key UNIQUE (short_channel_id)", &[]).await.unwrap();
201 tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id DROP DEFAULT", &[]).await.unwrap();
202 tx.execute("ALTER TABLE channel_announcements ALTER short_channel_id SET NOT NULL", &[]).await.unwrap();
203 tx.execute("UPDATE config SET db_schema = 4 WHERE id = 1", &[]).await.unwrap();
204 tx.commit().await.unwrap();
206 if schema >= 1 && schema <= 4 {
207 let tx = client.transaction().await.unwrap();
208 tx.execute("ALTER TABLE channel_updates ALTER composite_index SET DATA TYPE character(29)", &[]).await.unwrap();
209 tx.execute("UPDATE config SET db_schema = 5 WHERE id = 1", &[]).await.unwrap();
210 tx.commit().await.unwrap();
212 if schema >= 1 && schema <= 5 {
213 let tx = client.transaction().await.unwrap();
214 tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET DATA TYPE smallint", &[]).await.unwrap();
215 tx.execute("ALTER TABLE channel_announcements DROP COLUMN block_height", &[]).await.unwrap();
216 tx.execute("UPDATE config SET db_schema = 6 WHERE id = 1", &[]).await.unwrap();
217 tx.commit().await.unwrap();
219 if schema >= 1 && schema <= 6 {
220 let tx = client.transaction().await.unwrap();
221 tx.execute("ALTER TABLE channel_updates DROP COLUMN composite_index", &[]).await.unwrap();
222 tx.execute("ALTER TABLE channel_updates ALTER timestamp SET NOT NULL", &[]).await.unwrap();
223 tx.execute("ALTER TABLE channel_updates ALTER channel_flags SET NOT NULL", &[]).await.unwrap();
224 tx.execute("ALTER TABLE channel_updates ALTER disable SET NOT NULL", &[]).await.unwrap();
225 tx.execute("ALTER TABLE channel_updates ALTER cltv_expiry_delta SET NOT NULL", &[]).await.unwrap();
226 tx.execute("ALTER TABLE channel_updates ALTER htlc_minimum_msat SET NOT NULL", &[]).await.unwrap();
227 tx.execute("ALTER TABLE channel_updates ALTER fee_base_msat SET NOT NULL", &[]).await.unwrap();
228 tx.execute("ALTER TABLE channel_updates ALTER fee_proportional_millionths SET NOT NULL", &[]).await.unwrap();
229 tx.execute("ALTER TABLE channel_updates ALTER htlc_maximum_msat SET NOT NULL", &[]).await.unwrap();
230 tx.execute("ALTER TABLE channel_updates ALTER blob_signed SET NOT NULL", &[]).await.unwrap();
231 tx.execute("CREATE UNIQUE INDEX channel_updates_key ON channel_updates (short_channel_id, direction, timestamp)", &[]).await.unwrap();
232 tx.execute("UPDATE config SET db_schema = 7 WHERE id = 1", &[]).await.unwrap();
233 tx.commit().await.unwrap();
235 if schema >= 1 && schema <= 7 {
236 let tx = client.transaction().await.unwrap();
237 tx.execute("DROP INDEX IF EXISTS channels_seen", &[]).await.unwrap();
238 tx.execute("DROP INDEX IF EXISTS channel_updates_scid", &[]).await.unwrap();
239 tx.execute("DROP INDEX IF EXISTS channel_updates_direction", &[]).await.unwrap();
240 tx.execute("DROP INDEX IF EXISTS channel_updates_seen", &[]).await.unwrap();
241 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_seen", &[]).await.unwrap();
242 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_dir_seen", &[]).await.unwrap();
243 tx.execute("UPDATE config SET db_schema = 8 WHERE id = 1", &[]).await.unwrap();
244 tx.commit().await.unwrap();
246 if schema >= 1 && schema <= 8 {
247 let tx = client.transaction().await.unwrap();
248 tx.execute("DROP INDEX IF EXISTS channel_updates_seen", &[]).await.unwrap();
249 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_seen", &[]).await.unwrap();
250 tx.execute("UPDATE config SET db_schema = 9 WHERE id = 1", &[]).await.unwrap();
251 tx.commit().await.unwrap();
253 if schema >= 1 && schema <= 9 {
254 let tx = client.transaction().await.unwrap();
255 tx.execute("DROP INDEX IF EXISTS channel_updates_scid_dir_seen", &[]).await.unwrap();
256 tx.execute("UPDATE config SET db_schema = 10 WHERE id = 1", &[]).await.unwrap();
257 tx.commit().await.unwrap();
259 if schema >= 1 && schema <= 10 {
260 let tx = client.transaction().await.unwrap();
261 tx.execute("DROP INDEX IF EXISTS channel_updates_id_with_scid_dir_blob", &[]).await.unwrap();
262 tx.execute("UPDATE config SET db_schema = 11 WHERE id = 1", &[]).await.unwrap();
263 tx.commit().await.unwrap();
265 if schema >= 1 && schema <= 11 {
266 let tx = client.transaction().await.unwrap();
267 tx.execute("DROP INDEX IF EXISTS channel_updates_seen_with_id_direction_blob", &[]).await.unwrap();
268 tx.execute("UPDATE config SET db_schema = 12 WHERE id = 1", &[]).await.unwrap();
269 tx.commit().await.unwrap();
271 if schema <= 1 || schema > SCHEMA_VERSION {
272 panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
274 // PostgreSQL (at least v13, but likely later versions as well) handles insert-only tables
275 // *very* poorly. After some number of inserts, it refuses to rely on indexes, assuming them to
276 // be possibly-stale, until a VACUUM happens. Thus, we set the vacuum factor really low here,
277 // pushing PostgreSQL to vacuum often.
278 // See https://www.cybertec-postgresql.com/en/postgresql-autovacuum-insert-only-tables/
279 let _ = client.execute("ALTER TABLE channel_updates SET ( autovacuum_vacuum_insert_scale_factor = 0.005 );", &[]).await;
280 let _ = client.execute("ALTER TABLE channel_announcements SET ( autovacuum_vacuum_insert_scale_factor = 0.005 );", &[]).await;
283 pub(crate) fn ln_peers() -> Vec<(PublicKey, SocketAddr)> {
284 const WALLET_OF_SATOSHI: &str = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
285 let list = env::var("LN_PEERS").unwrap_or(WALLET_OF_SATOSHI.to_string());
286 let mut peers = Vec::new();
287 for peer_info in list.split(',') {
288 peers.push(resolve_peer_info(peer_info).expect("Invalid peer info in LN_PEERS"));
293 fn resolve_peer_info(peer_info: &str) -> Result<(PublicKey, SocketAddr), &str> {
294 let mut peer_info = peer_info.splitn(2, '@');
296 let pubkey = peer_info.next().ok_or("Invalid peer info. Should be formatted as: `pubkey@host:port`")?;
297 let pubkey = Vec::from_hex(pubkey).map_err(|_| "Invalid node pubkey")?;
298 let pubkey = PublicKey::from_slice(&pubkey).map_err(|_| "Invalid node pubkey")?;
300 let socket_address = peer_info.next().ok_or("Invalid peer info. Should be formatted as: `pubkey@host:port`")?;
301 let socket_address = socket_address
303 .map_err(|_| "Cannot resolve node address")?
305 .ok_or("Cannot resolve node address")?;
307 Ok((pubkey, socket_address))
312 use super::resolve_peer_info;
313 use bitcoin::hashes::hex::ToHex;
316 fn test_resolve_peer_info() {
317 let wallet_of_satoshi = "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735";
318 let (pubkey, socket_address) = resolve_peer_info(wallet_of_satoshi).unwrap();
319 assert_eq!(pubkey.serialize().to_hex(), "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226");
320 assert_eq!(socket_address.to_string(), "170.75.163.209:9735");
322 let ipv6 = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@[2001:db8::1]:80";
323 let (pubkey, socket_address) = resolve_peer_info(ipv6).unwrap();
324 assert_eq!(pubkey.serialize().to_hex(), "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025");
325 assert_eq!(socket_address.to_string(), "[2001:db8::1]:80");
327 let localhost = "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025@localhost:9735";
328 let (pubkey, socket_address) = resolve_peer_info(localhost).unwrap();
329 assert_eq!(pubkey.serialize().to_hex(), "033d8656219478701227199cbd6f670335c8d408a92ae88b962c49d4dc0e83e025");
330 let socket_address = socket_address.to_string();
331 assert!(socket_address == "127.0.0.1:9735" || socket_address == "[::1]:9735");