Create first unit test.
[rapid-gossip-sync-server] / src / lib.rs
index 363d4aeb9e94ddb2c8a1baddad45027eb2ac7129..1852911a68a02b18e32bc7bc2a4a7c86d9b1b667 100644 (file)
@@ -12,12 +12,16 @@ extern crate core;
 use std::collections::{HashMap, HashSet};
 use std::fs::File;
 use std::io::BufReader;
+use std::ops::Deref;
 use std::sync::Arc;
+use lightning::log_info;
 
 use lightning::routing::gossip::{NetworkGraph, NodeId};
 use lightning::util::logger::Logger;
 use lightning::util::ser::{ReadableArgs, Writeable};
 use tokio::sync::mpsc;
+use tokio_postgres::{Client, NoTls};
+use crate::config::SYMLINK_GRANULARITY_INTERVAL;
 use crate::lookup::DeltaSet;
 
 use crate::persistence::GossipPersister;
@@ -37,15 +41,18 @@ mod verifier;
 
 pub mod types;
 
+#[cfg(test)]
+mod tests;
+
 /// The purpose of this prefix is to identify the serialization format, should other rapid gossip
 /// sync formats arise in the future.
 ///
 /// The fourth byte is the protocol version in case our format gets updated.
 const GOSSIP_PREFIX: [u8; 4] = [76, 68, 75, 1];
 
-pub struct RapidSyncProcessor<L: Logger> {
-       network_graph: Arc<NetworkGraph<Arc<L>>>,
-       logger: Arc<L>
+pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
+       network_graph: Arc<NetworkGraph<L>>,
+       logger: L
 }
 
 pub struct SerializedResponse {
@@ -57,18 +64,18 @@ pub struct SerializedResponse {
        pub update_count_incremental: u32,
 }
 
-impl<L: Logger + Send + Sync + 'static> RapidSyncProcessor<L> {
-       pub fn new(logger: Arc<L>) -> Self {
+impl<L: Deref + Clone + Send + Sync + 'static> RapidSyncProcessor<L> where L::Target: Logger {
+       pub fn new(logger: L) -> Self {
                let network = config::network();
                let network_graph = if let Ok(file) = File::open(&config::network_graph_cache_path()) {
-                       println!("Initializing from cached network graph…");
+                       log_info!(logger, "Initializing from cached network graph…");
                        let mut buffered_reader = BufReader::new(file);
                        let network_graph_result = NetworkGraph::read(&mut buffered_reader, logger.clone());
                        if let Ok(network_graph) = network_graph_result {
-                               println!("Initialized from cached network graph!");
+                               log_info!(logger, "Initialized from cached network graph!");
                                network_graph
                        } else {
-                               println!("Initialization from cached network graph failed: {}", network_graph_result.err().unwrap());
+                               log_info!(logger, "Initialization from cached network graph failed: {}", network_graph_result.err().unwrap());
                                NetworkGraph::new(network, logger.clone())
                        }
                } else {
@@ -82,16 +89,19 @@ impl<L: Logger + Send + Sync + 'static> RapidSyncProcessor<L> {
        }
 
        pub async fn start_sync(&self) {
+               log_info!(self.logger, "Starting Rapid Gossip Sync Server");
+               log_info!(self.logger, "Snapshot interval: {} seconds", config::snapshot_generation_interval());
+
                // means to indicate sync completion status within this module
                let (sync_completion_sender, mut sync_completion_receiver) = mpsc::channel::<()>(1);
 
                if config::DOWNLOAD_NEW_GOSSIP {
-                       let (mut persister, persistence_sender) = GossipPersister::new(Arc::clone(&self.network_graph));
+                       let (mut persister, persistence_sender) = GossipPersister::new(self.network_graph.clone(), self.logger.clone());
 
-                       println!("Starting gossip download");
+                       log_info!(self.logger, "Starting gossip download");
                        tokio::spawn(tracking::download_gossip(persistence_sender, sync_completion_sender,
-                               Arc::clone(&self.network_graph), Arc::clone(&self.logger)));
-                       println!("Starting gossip db persistence listener");
+                               Arc::clone(&self.network_graph), self.logger.clone()));
+                       log_info!(self.logger, "Starting gossip db persistence listener");
                        tokio::spawn(async move { persister.persist_gossip().await; });
                } else {
                        sync_completion_sender.send(()).await.unwrap();
@@ -101,13 +111,35 @@ impl<L: Logger + Send + Sync + 'static> RapidSyncProcessor<L> {
                if sync_completion.is_none() {
                        panic!("Sync failed!");
                }
-               println!("Initial sync complete!");
+               log_info!(self.logger, "Initial sync complete!");
 
                // start the gossip snapshotting service
-               Snapshotter::new(Arc::clone(&self.network_graph)).snapshot_gossip().await;
+               Snapshotter::new(Arc::clone(&self.network_graph), self.logger.clone()).snapshot_gossip().await;
        }
 }
 
+pub(crate) async fn connect_to_db() -> Client {
+       let connection_config = config::db_connection_config();
+       let (client, connection) = connection_config.connect(NoTls).await.unwrap();
+
+       tokio::spawn(async move {
+               if let Err(e) = connection.await {
+                       panic!("connection error: {}", e);
+               }
+       });
+
+       #[cfg(test)]
+       {
+               let schema_name = tests::db_test_schema();
+               let schema_creation_command = format!("CREATE SCHEMA IF NOT EXISTS {}", schema_name);
+               client.execute(&schema_creation_command, &[]).await.unwrap();
+               client.execute(&format!("SET search_path TO {}", schema_name), &[]).await.unwrap();
+       }
+
+       client.execute("set time zone UTC", &[]).await.unwrap();
+       client
+}
+
 /// This method generates a no-op blob that can be used as a delta where none exists.
 ///
 /// The primary purpose of this method is the scenario of a client retrieving and processing a
@@ -129,7 +161,7 @@ fn serialize_empty_blob(current_timestamp: u64) -> Vec<u8> {
        let chain_hash = genesis_block.block_hash();
        chain_hash.write(&mut blob).unwrap();
 
-       let blob_timestamp = Snapshotter::<RGSSLogger>::round_down_to_nearest_multiple(current_timestamp, config::SNAPSHOT_CALCULATION_INTERVAL as u64) as u32;
+       let blob_timestamp = Snapshotter::<Arc<RGSSLogger>>::round_down_to_nearest_multiple(current_timestamp, SYMLINK_GRANULARITY_INTERVAL as u64) as u32;
        blob_timestamp.write(&mut blob).unwrap();
 
        0u32.write(&mut blob).unwrap(); // node count
@@ -139,18 +171,13 @@ fn serialize_empty_blob(current_timestamp: u64) -> Vec<u8> {
        blob
 }
 
-async fn serialize_delta<L: Logger>(network_graph: Arc<NetworkGraph<Arc<L>>>, last_sync_timestamp: u32) -> SerializedResponse {
-       let (client, connection) = lookup::connect_to_db().await;
+async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, logger: L) -> SerializedResponse where L::Target: Logger {
+       let client = connect_to_db().await;
 
        network_graph.remove_stale_channels_and_tracking();
 
-       tokio::spawn(async move {
-               if let Err(e) = connection.await {
-                       panic!("connection error: {}", e);
-               }
-       });
-
        let mut output: Vec<u8> = vec![];
+       let snapshot_interval = config::snapshot_generation_interval();
 
        // set a flag if the chain hash is prepended
        // chain hash only necessary if either channel announcements or non-incremental updates are present
@@ -173,12 +200,12 @@ async fn serialize_delta<L: Logger>(network_graph: Arc<NetworkGraph<Arc<L>>>, la
        };
 
        let mut delta_set = DeltaSet::new();
-       lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp).await;
-       println!("announcement channel count: {}", delta_set.len());
-       lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp).await;
-       println!("update-fetched channel count: {}", delta_set.len());
-       lookup::filter_delta_set(&mut delta_set);
-       println!("update-filtered channel count: {}", delta_set.len());
+       lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, logger.clone()).await;
+       log_info!(logger, "announcement channel count: {}", delta_set.len());
+       lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
+       log_info!(logger, "update-fetched channel count: {}", delta_set.len());
+       lookup::filter_delta_set(&mut delta_set, logger.clone());
+       log_info!(logger, "update-filtered channel count: {}", delta_set.len());
        let serialization_details = serialization::serialize_delta_set(delta_set, last_sync_timestamp);
 
        // process announcements
@@ -236,7 +263,7 @@ async fn serialize_delta<L: Logger>(network_graph: Arc<NetworkGraph<Arc<L>>>, la
        serialization_details.chain_hash.write(&mut prefixed_output).unwrap();
        // always write the latest seen timestamp
        let latest_seen_timestamp = serialization_details.latest_seen;
-       let overflow_seconds = latest_seen_timestamp % config::SNAPSHOT_CALCULATION_INTERVAL;
+       let overflow_seconds = latest_seen_timestamp % snapshot_interval;
        let serialized_seen_timestamp = latest_seen_timestamp.saturating_sub(overflow_seconds);
        serialized_seen_timestamp.write(&mut prefixed_output).unwrap();
 
@@ -249,8 +276,8 @@ async fn serialize_delta<L: Logger>(network_graph: Arc<NetworkGraph<Arc<L>>>, la
 
        prefixed_output.append(&mut output);
 
-       println!("duplicated node ids: {}", duplicate_node_ids);
-       println!("latest seen timestamp: {:?}", serialization_details.latest_seen);
+       log_info!(logger, "duplicated node ids: {}", duplicate_node_ids);
+       log_info!(logger, "latest seen timestamp: {:?}", serialization_details.latest_seen);
 
        SerializedResponse {
                data: prefixed_output,