use std::fs::OpenOptions;
-use std::io::BufWriter;
+use std::io::{BufWriter, Write};
+use std::ops::Deref;
use std::sync::Arc;
-use std::time::Instant;
+use std::time::{Duration, Instant};
+use lightning::log_info;
use lightning::routing::gossip::NetworkGraph;
+use lightning::util::logger::Logger;
use lightning::util::ser::Writeable;
-use tokio::sync::mpsc;
-use tokio_postgres::NoTls;
+use tokio::runtime::Runtime;
+use tokio::sync::{mpsc, Mutex, Semaphore};
-use crate::{config, hex_utils, TestLogger};
-use crate::types::{DetectedGossipMessage, GossipMessage};
+use crate::config;
+use crate::types::GossipMessage;
-pub(crate) struct GossipPersister {
- pub(crate) gossip_persistence_sender: mpsc::Sender<DetectedGossipMessage>,
- gossip_persistence_receiver: mpsc::Receiver<DetectedGossipMessage>,
- server_sync_completion_sender: mpsc::Sender<()>,
- network_graph: Arc<NetworkGraph<Arc<TestLogger>>>,
+const POSTGRES_INSERT_TIMEOUT: Duration = Duration::from_secs(15);
+const INSERT_PARALELLISM: usize = 16;
+
+pub(crate) struct GossipPersister<L: Deref> where L::Target: Logger {
+ gossip_persistence_receiver: mpsc::Receiver<GossipMessage>,
+ network_graph: Arc<NetworkGraph<L>>,
+ tokio_runtime: Runtime,
+ logger: L
}
-impl GossipPersister {
- pub fn new(server_sync_completion_sender: mpsc::Sender<()>, network_graph: Arc<NetworkGraph<Arc<TestLogger>>>) -> Self {
+impl<L: Deref> GossipPersister<L> where L::Target: Logger {
+ pub fn new(network_graph: Arc<NetworkGraph<L>>, logger: L) -> (Self, mpsc::Sender<GossipMessage>) {
let (gossip_persistence_sender, gossip_persistence_receiver) =
- mpsc::channel::<DetectedGossipMessage>(10000);
- GossipPersister {
- gossip_persistence_sender,
+ mpsc::channel::<GossipMessage>(100);
+ let runtime = Runtime::new().unwrap();
+ (GossipPersister {
gossip_persistence_receiver,
- server_sync_completion_sender,
- network_graph
- }
+ network_graph,
+ tokio_runtime: runtime,
+ logger
+ }, gossip_persistence_sender)
}
pub(crate) async fn persist_gossip(&mut self) {
- let connection_config = config::db_connection_config();
- let (client, connection) =
- connection_config.connect(NoTls).await.unwrap();
-
- tokio::spawn(async move {
- if let Err(e) = connection.await {
- panic!("connection error: {}", e);
- }
- });
+ { // initialize the database
+ // this client instance is only used once
+ let mut client = crate::connect_to_db().await;
- {
- // initialize the database
let initialization = client
.execute(config::db_config_table_creation_query(), &[])
.await;
panic!("db init error: {}", initialization_error);
}
+ let cur_schema = client.query("SELECT db_schema FROM config WHERE id = $1", &[&1]).await.unwrap();
+ if !cur_schema.is_empty() {
+ config::upgrade_db(cur_schema[0].get(0), &mut client).await;
+ }
+
+ let preparation = client.execute("set time zone UTC", &[]).await;
+ if let Err(preparation_error) = preparation {
+ panic!("db preparation error: {}", preparation_error);
+ }
+
let initialization = client
.execute(
// TODO: figure out a way to fix the id value without Postgres complaining about
}
}
- // print log statement every 10,000 messages
- let mut persistence_log_threshold = 10000;
+ // print log statement every minute
+ let mut latest_persistence_log = Instant::now() - Duration::from_secs(60);
let mut i = 0u32;
- let mut server_sync_completion_sent = false;
- let mut latest_graph_cache_time: Option<Instant> = None;
+ let mut latest_graph_cache_time = Instant::now();
+ let insert_limiter = Arc::new(Semaphore::new(INSERT_PARALELLISM));
+ let connections_cache = Arc::new(Mutex::new(Vec::with_capacity(INSERT_PARALELLISM)));
+ #[cfg(test)]
+ let mut tasks_spawned = Vec::new();
// TODO: it would be nice to have some sort of timeout here so after 10 seconds of
// inactivity, some sort of message could be broadcast signaling the activation of request
// processing
- while let Some(detected_gossip_message) = &self.gossip_persistence_receiver.recv().await {
+ while let Some(gossip_message) = self.gossip_persistence_receiver.recv().await {
i += 1; // count the persisted gossip messages
- if i == 1 || i % persistence_log_threshold == 0 {
- println!("Persisting gossip message #{}", i);
+ if latest_persistence_log.elapsed().as_secs() >= 60 {
+ log_info!(self.logger, "Persisting gossip message #{}", i);
+ latest_persistence_log = Instant::now();
}
- if let Some(last_cache_time) = latest_graph_cache_time {
- // has it been ten minutes? Just cache it
- if last_cache_time.elapsed().as_secs() >= 600 {
- self.persist_network_graph();
- latest_graph_cache_time = Some(Instant::now());
- }
- } else {
- // initialize graph cache timer
- latest_graph_cache_time = Some(Instant::now());
+ // has it been ten minutes? Just cache it
+ if latest_graph_cache_time.elapsed().as_secs() >= 600 {
+ self.persist_network_graph();
+ latest_graph_cache_time = Instant::now();
}
+ insert_limiter.acquire().await.unwrap().forget();
- match &detected_gossip_message.message {
- GossipMessage::InitialSyncComplete => {
- // signal to the server that it may now serve dynamic responses and calculate
- // snapshots
- // we take this detour through the persister to ensure that all previous
- // messages have already been persisted to the database
- println!("Persister caught up with gossip!");
- i -= 1; // this wasn't an actual gossip message that needed persisting
- persistence_log_threshold = 50;
- if !server_sync_completion_sent {
- server_sync_completion_sent = true;
- self.server_sync_completion_sender.send(()).await.unwrap();
- println!("Server has been notified of persistence completion.");
- }
-
- // now, cache the persisted network graph
- // also persist the network graph here
- let mut too_soon = false;
- if let Some(latest_graph_cache_time) = latest_graph_cache_time {
- let time_since_last_cached = latest_graph_cache_time.elapsed().as_secs();
- // don't cache more frequently than every 2 minutes
- too_soon = time_since_last_cached < 120;
- }
- if too_soon {
- println!("Network graph has been cached too recently.");
- }else {
- latest_graph_cache_time = Some(Instant::now());
- self.persist_network_graph();
- }
- }
- GossipMessage::ChannelAnnouncement(announcement) => {
+ let limiter_ref = Arc::clone(&insert_limiter);
+ let client = {
+ let mut connections_set = connections_cache.lock().await;
+ let client = if connections_set.is_empty() {
+ crate::connect_to_db().await
+ } else {
+ connections_set.pop().unwrap()
+ };
+ client
+ };
- let scid = announcement.contents.short_channel_id;
- let scid_hex = hex_utils::hex_str(&scid.to_be_bytes());
- // scid is 8 bytes
- // block height is the first three bytes
- // to obtain block height, shift scid right by 5 bytes (40 bits)
- let block_height = (scid >> 5 * 8) as i32;
- let chain_hash = announcement.contents.chain_hash.as_ref();
- let chain_hash_hex = hex_utils::hex_str(chain_hash);
+ let connections_cache_ref = Arc::clone(&connections_cache);
+ match gossip_message {
+ GossipMessage::ChannelAnnouncement(announcement, seen_override) => {
+ let scid = announcement.contents.short_channel_id as i64;
// start with the type prefix, which is already known a priori
- let mut announcement_signed = Vec::new(); // vec![1, 0];
+ let mut announcement_signed = Vec::new();
announcement.write(&mut announcement_signed).unwrap();
- let result = client
- .execute("INSERT INTO channel_announcements (\
- short_channel_id, \
- block_height, \
- chain_hash, \
- announcement_signed \
- ) VALUES ($1, $2, $3, $4) ON CONFLICT (short_channel_id) DO NOTHING", &[
- &scid_hex,
- &block_height,
- &chain_hash_hex,
- &announcement_signed
- ]).await;
- if result.is_err() {
- panic!("error: {}", result.err().unwrap());
- }
+ let _task = self.tokio_runtime.spawn(async move {
+ if cfg!(test) && seen_override.is_some() {
+ tokio::time::timeout(POSTGRES_INSERT_TIMEOUT, client
+ .execute("INSERT INTO channel_announcements (\
+ short_channel_id, \
+ announcement_signed, \
+ seen \
+ ) VALUES ($1, $2, TO_TIMESTAMP($3)) ON CONFLICT (short_channel_id) DO NOTHING", &[
+ &scid,
+ &announcement_signed,
+ &(seen_override.unwrap() as f64)
+ ])).await.unwrap().unwrap();
+ } else {
+ tokio::time::timeout(POSTGRES_INSERT_TIMEOUT, client
+ .execute("INSERT INTO channel_announcements (\
+ short_channel_id, \
+ announcement_signed \
+ ) VALUES ($1, $2) ON CONFLICT (short_channel_id) DO NOTHING", &[
+ &scid,
+ &announcement_signed
+ ])).await.unwrap().unwrap();
+ }
+ let mut connections_set = connections_cache_ref.lock().await;
+ connections_set.push(client);
+ limiter_ref.add_permits(1);
+ });
+ #[cfg(test)]
+ tasks_spawned.push(_task);
}
- GossipMessage::ChannelUpdate(update) => {
- let scid = update.contents.short_channel_id;
- let scid_hex = hex_utils::hex_str(&scid.to_be_bytes());
-
- let chain_hash = update.contents.chain_hash.as_ref();
- let chain_hash_hex = hex_utils::hex_str(chain_hash);
+ GossipMessage::ChannelUpdate(update, seen_override) => {
+ let scid = update.contents.short_channel_id as i64;
let timestamp = update.contents.timestamp as i64;
- let channel_flags = update.contents.flags as i32;
- let direction = channel_flags & 1;
- let disable = (channel_flags & 2) > 0;
-
- let composite_index = format!("{}:{}:{}", scid_hex, timestamp, direction);
+ let direction = (update.contents.flags & 1) == 1;
+ let disable = (update.contents.flags & 2) > 0;
let cltv_expiry_delta = update.contents.cltv_expiry_delta as i32;
let htlc_minimum_msat = update.contents.htlc_minimum_msat as i64;
let htlc_maximum_msat = update.contents.htlc_maximum_msat as i64;
// start with the type prefix, which is already known a priori
- let mut update_signed = Vec::new(); // vec![1, 2];
+ let mut update_signed = Vec::new();
update.write(&mut update_signed).unwrap();
- let result = client
- .execute("INSERT INTO channel_updates (\
- composite_index, \
- chain_hash, \
+ let insertion_statement = if cfg!(test) {
+ "INSERT INTO channel_updates (\
short_channel_id, \
timestamp, \
+ seen, \
channel_flags, \
direction, \
disable, \
fee_proportional_millionths, \
htlc_maximum_msat, \
blob_signed \
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) ON CONFLICT (composite_index) DO NOTHING", &[
- &composite_index,
- &chain_hash_hex,
- &scid_hex,
- ×tamp,
- &channel_flags,
- &direction,
- &disable,
- &cltv_expiry_delta,
- &htlc_minimum_msat,
- &fee_base_msat,
- &fee_proportional_millionths,
- &htlc_maximum_msat,
- &update_signed
- ]).await;
- if result.is_err() {
- panic!("error: {}", result.err().unwrap());
- }
+ ) VALUES ($1, $2, TO_TIMESTAMP($3), $4, $5, $6, $7, $8, $9, $10, $11, $12) ON CONFLICT DO NOTHING"
+ } else {
+ "INSERT INTO channel_updates (\
+ short_channel_id, \
+ timestamp, \
+ channel_flags, \
+ direction, \
+ disable, \
+ cltv_expiry_delta, \
+ htlc_minimum_msat, \
+ fee_base_msat, \
+ fee_proportional_millionths, \
+ htlc_maximum_msat, \
+ blob_signed \
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) ON CONFLICT DO NOTHING"
+ };
+
+ // this may not be used outside test cfg
+ let _seen_timestamp = seen_override.unwrap_or(timestamp as u32) as f64;
+
+ let _task = self.tokio_runtime.spawn(async move {
+ tokio::time::timeout(POSTGRES_INSERT_TIMEOUT, client
+ .execute(insertion_statement, &[
+ &scid,
+ ×tamp,
+ #[cfg(test)]
+ &_seen_timestamp,
+ &(update.contents.flags as i16),
+ &direction,
+ &disable,
+ &cltv_expiry_delta,
+ &htlc_minimum_msat,
+ &fee_base_msat,
+ &fee_proportional_millionths,
+ &htlc_maximum_msat,
+ &update_signed
+ ])).await.unwrap().unwrap();
+ let mut connections_set = connections_cache_ref.lock().await;
+ connections_set.push(client);
+ limiter_ref.add_permits(1);
+ });
+ #[cfg(test)]
+ tasks_spawned.push(_task);
}
}
}
+ #[cfg(test)]
+ for task in tasks_spawned {
+ task.await.unwrap();
+ }
}
fn persist_network_graph(&self) {
- println!("Caching network graph…");
+ log_info!(self.logger, "Caching network graph…");
let cache_path = config::network_graph_cache_path();
let file = OpenOptions::new()
.create(true)
.truncate(true)
.open(&cache_path)
.unwrap();
- self.network_graph.remove_stale_channels();
+ self.network_graph.remove_stale_channels_and_tracking();
let mut writer = BufWriter::new(file);
self.network_graph.write(&mut writer).unwrap();
- println!("Cached network graph!");
+ writer.flush().unwrap();
+ log_info!(self.logger, "Cached network graph!");
}
}