Merge pull request #54 from arik-so/2023/08/incremental-update-fix
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 17 Aug 2023 20:55:48 +0000 (20:55 +0000)
committerGitHub <noreply@github.com>
Thu, 17 Aug 2023 20:55:48 +0000 (20:55 +0000)
Only send full updates with announcements

src/config.rs
src/lib.rs
src/lookup.rs
src/persistence.rs
src/serialization.rs

index 804b0f8ff8a66262b7273702239b3ac645d24fae..db6d2ef7a243d69de87d41963d3087f3543cb88c 100644 (file)
@@ -15,7 +15,7 @@ use lightning::util::ser::Readable;
 use lightning_block_sync::http::HttpEndpoint;
 use tokio_postgres::Config;
 
-pub(crate) const SCHEMA_VERSION: i32 = 11;
+pub(crate) const SCHEMA_VERSION: i32 = 12;
 pub(crate) const SNAPSHOT_CALCULATION_INTERVAL: u32 = 3600 * 24; // every 24 hours, in seconds
 /// If the last update in either direction was more than six days ago, we send a reminder
 /// That reminder may be either in the form of a channel announcement, or in the form of empty
@@ -117,11 +117,12 @@ pub(crate) fn db_channel_update_table_creation_query() -> &'static str {
 
 pub(crate) fn db_index_creation_query() -> &'static str {
        "
-       CREATE INDEX IF NOT EXISTS channel_updates_seen_with_id_direction_blob ON channel_updates(seen) INCLUDE (id, direction, blob_signed);
        CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
        CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_asc ON channel_updates(short_channel_id, direction, seen);
        CREATE INDEX IF NOT EXISTS channel_updates_scid_dir_seen_desc_with_id ON channel_updates(short_channel_id ASC, direction ASC, seen DESC) INCLUDE (id);
        CREATE UNIQUE INDEX IF NOT EXISTS channel_updates_key ON channel_updates (short_channel_id, direction, timestamp);
+       CREATE INDEX IF NOT EXISTS channel_updates_seen ON channel_updates(seen);
+       CREATE INDEX IF NOT EXISTS channel_updates_timestamp_desc ON channel_updates(timestamp DESC);
        "
 }
 
@@ -254,6 +255,12 @@ pub(crate) async fn upgrade_db(schema: i32, client: &mut tokio_postgres::Client)
                tx.execute("UPDATE config SET db_schema = 11 WHERE id = 1", &[]).await.unwrap();
                tx.commit().await.unwrap();
        }
+       if schema >= 1 && schema <= 11 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("DROP INDEX IF EXISTS channel_updates_seen_with_id_direction_blob", &[]).await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 12 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
        if schema <= 1 || schema > SCHEMA_VERSION {
                panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
        }
index b6281bcb6836b58f7ecff75664ba6075bde8df13..550ed79ddcdac4d56f65e93e5ac94f7fa030bfa7 100644 (file)
@@ -20,6 +20,7 @@ use lightning::routing::gossip::{NetworkGraph, NodeId};
 use lightning::util::logger::Logger;
 use lightning::util::ser::{ReadableArgs, Writeable};
 use tokio::sync::mpsc;
+use tokio_postgres::{Client, NoTls};
 use crate::lookup::DeltaSet;
 
 use crate::persistence::GossipPersister;
@@ -110,6 +111,20 @@ impl<L: Deref + Clone + Send + Sync + 'static> RapidSyncProcessor<L> where L::Ta
        }
 }
 
+pub(crate) async fn connect_to_db() -> Client {
+       let connection_config = config::db_connection_config();
+       let (client, connection) = connection_config.connect(NoTls).await.unwrap();
+
+       tokio::spawn(async move {
+               if let Err(e) = connection.await {
+                       panic!("connection error: {}", e);
+               }
+       });
+
+       client.execute("set time zone UTC", &[]).await.unwrap();
+       client
+}
+
 /// This method generates a no-op blob that can be used as a delta where none exists.
 ///
 /// The primary purpose of this method is the scenario of a client retrieving and processing a
@@ -142,16 +157,10 @@ fn serialize_empty_blob(current_timestamp: u64) -> Vec<u8> {
 }
 
 async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, logger: L) -> SerializedResponse where L::Target: Logger {
-       let (client, connection) = lookup::connect_to_db().await;
+       let client = connect_to_db().await;
 
        network_graph.remove_stale_channels_and_tracking();
 
-       tokio::spawn(async move {
-               if let Err(e) = connection.await {
-                       panic!("connection error: {}", e);
-               }
-       });
-
        let mut output: Vec<u8> = vec![];
 
        // set a flag if the chain hash is prepended
index 90591240b97a75eecb5c9482f20fb442252e8f33..696b4d041ab508b53c7074884a63b89047d4957a 100644 (file)
@@ -7,8 +7,7 @@ use std::time::{Instant, SystemTime, UNIX_EPOCH};
 use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
 use lightning::routing::gossip::NetworkGraph;
 use lightning::util::ser::Readable;
-use tokio_postgres::{Client, Connection, NoTls, Socket};
-use tokio_postgres::tls::NoTlsStream;
+use tokio_postgres::Client;
 
 use futures::StreamExt;
 use lightning::log_info;
@@ -68,11 +67,6 @@ impl Default for DirectedUpdateDelta {
        }
 }
 
-pub(super) async fn connect_to_db() -> (Client, Connection<Socket, NoTlsStream>) {
-       let connection_config = config::db_connection_config();
-       connection_config.connect(NoTls).await.unwrap()
-}
-
 /// Fetch all the channel announcements that are presently in the network graph, regardless of
 /// whether they had been seen before.
 /// Also include all announcements for which the first update was announced
@@ -288,6 +282,7 @@ pub(super) async fn fetch_channel_updates<L: Deref>(delta_set: &mut DeltaSet, cl
                SELECT id, direction, blob_signed, CAST(EXTRACT('epoch' from seen) AS BIGINT) AS seen
                FROM channel_updates
                WHERE seen >= TO_TIMESTAMP($1)
+               ORDER BY timestamp DESC
                ", [last_sync_timestamp_float]).await.unwrap();
        let mut pinned_updates = Box::pin(intermediate_updates);
        log_info!(logger, "Fetched intermediate rows in {:?}", start.elapsed());
index f638894dd3590c9d0c583452c7e03aba8275cb15..8bb7b188af72fb692aa218a20b1ede9bd24c116a 100644 (file)
@@ -8,7 +8,6 @@ use lightning::routing::gossip::NetworkGraph;
 use lightning::util::logger::Logger;
 use lightning::util::ser::Writeable;
 use tokio::sync::mpsc;
-use tokio_postgres::NoTls;
 
 use crate::config;
 use crate::types::GossipMessage;
@@ -33,15 +32,7 @@ impl<L: Deref> GossipPersister<L> where L::Target: Logger {
        }
 
        pub(crate) async fn persist_gossip(&mut self) {
-               let connection_config = config::db_connection_config();
-               let (mut client, connection) =
-                       connection_config.connect(NoTls).await.unwrap();
-
-               tokio::spawn(async move {
-                       if let Err(e) = connection.await {
-                               panic!("connection error: {}", e);
-                       }
-               });
+               let mut client = crate::connect_to_db().await;
 
                {
                        // initialize the database
@@ -57,6 +48,11 @@ impl<L: Deref> GossipPersister<L> where L::Target: Logger {
                                config::upgrade_db(cur_schema[0].get(0), &mut client).await;
                        }
 
+                       let preparation = client.execute("set time zone UTC", &[]).await;
+                       if let Err(preparation_error) = preparation {
+                               panic!("db preparation error: {}", preparation_error);
+                       }
+
                        let initialization = client
                                .execute(
                                        // TODO: figure out a way to fix the id value without Postgres complaining about
index 4c1d82879d9b7f281eb2a1c3eaa9ca6ce6f06dd4..86e114bdb8466e9b29a8164b098eb3036aa57794 100644 (file)
@@ -140,12 +140,12 @@ pub(super) fn serialize_delta_set(delta_set: DeltaSet, last_sync_timestamp: u32)
 
                let current_announcement_seen = channel_announcement_delta.seen;
                let is_new_announcement = current_announcement_seen >= last_sync_timestamp;
-               let is_newly_updated_announcement = if let Some(first_update_seen) = channel_delta.first_bidirectional_updates_seen {
+               let is_newly_included_announcement = if let Some(first_update_seen) = channel_delta.first_bidirectional_updates_seen {
                        first_update_seen >= last_sync_timestamp
                } else {
                        false
                };
-               let send_announcement = is_new_announcement || is_newly_updated_announcement;
+               let send_announcement = is_new_announcement || is_newly_included_announcement;
                if send_announcement {
                        serialization_set.latest_seen = max(serialization_set.latest_seen, current_announcement_seen);
                        serialization_set.announcements.push(channel_delta.announcement.unwrap().announcement);
@@ -166,7 +166,7 @@ pub(super) fn serialize_delta_set(delta_set: DeltaSet, last_sync_timestamp: u32)
 
                                        if updates.last_update_before_seen.is_some() {
                                                let mutated_properties = updates.mutated_properties;
-                                               if mutated_properties.len() == 5 {
+                                               if mutated_properties.len() == 5 || send_announcement {
                                                        // all five values have changed, it makes more sense to just
                                                        // serialize the update as a full update instead of as a change
                                                        // this way, the default values can be computed more efficiently