Correct v2 symlink paths main
authorMatt Corallo <git@bluematt.me>
Wed, 29 May 2024 14:54:57 +0000 (14:54 +0000)
committerMatt Corallo <git@bluematt.me>
Wed, 29 May 2024 14:54:57 +0000 (14:54 +0000)
Cargo.toml
src/config.rs
src/downloader.rs
src/lib.rs
src/lookup.rs
src/persistence.rs
src/serialization.rs
src/snapshot.rs
src/tests/mod.rs
src/types.rs

index ee31730bff17937322020a763a334313e8c04242..1fee6cd2a8fdf71fb9d593a5e13732a2ef665543 100644 (file)
@@ -6,16 +6,16 @@ edition = "2021"
 [dependencies]
 bitcoin = "0.30"
 hex-conservative = "0.2"
-lightning = { version = "0.0.121" }
-lightning-block-sync = { version = "0.0.121", features=["rest-client"] }
-lightning-net-tokio = { version = "0.0.121" }
+lightning = { version = "0.0.123" }
+lightning-block-sync = { version = "0.0.123", features=["rest-client"] }
+lightning-net-tokio = { version = "0.0.123" }
 tokio = { version = "1.25", features = ["full"] }
 tokio-postgres = { version = "=0.7.5" }
 futures = "0.3"
 
 [dev-dependencies]
-lightning = { version = "0.0.121", features = ["_test_utils"] }
-lightning-rapid-gossip-sync = { version = "0.0.121" }
+lightning = { version = "0.0.123", features = ["_test_utils"] }
+lightning-rapid-gossip-sync = { version = "0.0.123" }
 
 [profile.dev]
 panic = "abort"
index 0941e78953424e0619934a2ab02b4e91de170260..4950ec07a055d311abc9291a66799157decedc28 100644 (file)
@@ -14,7 +14,7 @@ use lightning::util::ser::Readable;
 use lightning_block_sync::http::HttpEndpoint;
 use tokio_postgres::Config;
 
-pub(crate) const SCHEMA_VERSION: i32 = 13;
+pub(crate) const SCHEMA_VERSION: i32 = 14;
 pub(crate) const SYMLINK_GRANULARITY_INTERVAL: u32 = 3600 * 3; // three hours
 pub(crate) const MAX_SNAPSHOT_SCOPE: u32 = 3600 * 24 * 21; // three weeks
 // generate symlinks based on a 3-hour-granularity
@@ -22,6 +22,10 @@ pub(crate) const MAX_SNAPSHOT_SCOPE: u32 = 3600 * 24 * 21; // three weeks
 /// That reminder may be either in the form of a channel announcement, or in the form of empty
 /// updates in both directions.
 pub(crate) const CHANNEL_REMINDER_AGE: Duration = Duration::from_secs(6 * 24 * 60 * 60);
+
+/// Maximum number of default features to calculate for node announcements
+pub(crate) const NODE_DEFAULT_FEATURE_COUNT: u8 = 6;
+
 /// The number of successful peer connections to await prior to continuing to gossip storage.
 /// The application will still work if the number of specified peers is lower, as long as there is
 /// at least one successful peer connection, but it may result in long startup times.
@@ -135,6 +139,18 @@ pub(crate) fn db_channel_update_table_creation_query() -> &'static str {
        )"
 }
 
+pub(crate) fn db_node_announcement_table_creation_query() -> &'static str {
+       "CREATE TABLE IF NOT EXISTS node_announcements (
+               id SERIAL PRIMARY KEY,
+               public_key varchar(66) NOT NULL,
+               features BYTEA NOT NULL,
+               socket_addresses BYTEA NOT NULL,
+               timestamp bigint NOT NULL,
+               announcement_signed BYTEA,
+               seen timestamp NOT NULL DEFAULT NOW()
+       )"
+}
+
 pub(crate) fn db_index_creation_query() -> &'static str {
        "
        CREATE INDEX IF NOT EXISTS channel_updates_seen_scid ON channel_updates(seen, short_channel_id);
@@ -287,6 +303,11 @@ pub(crate) async fn upgrade_db(schema: i32, client: &mut tokio_postgres::Client)
                tx.execute("UPDATE config SET db_schema = 13 WHERE id = 1", &[]).await.unwrap();
                tx.commit().await.unwrap();
        }
+       if schema >= 1 && schema <= 13 {
+               let tx = client.transaction().await.unwrap();
+               tx.execute("UPDATE config SET db_schema = 14 WHERE id = 1", &[]).await.unwrap();
+               tx.commit().await.unwrap();
+       }
        if schema <= 1 || schema > SCHEMA_VERSION {
                panic!("Unknown schema in db: {}, we support up to {}", schema, SCHEMA_VERSION);
        }
@@ -372,7 +393,7 @@ mod tests {
                // Set the environment variable, including a repeated comma, leading space, and trailing comma.
                std::env::set_var("LN_PEERS", "035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226@170.75.163.209:9735,, 035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc227@170.75.163.210:9735,");
                let peers = ln_peers();
-               
+
                // Assert output is as expected
                assert_eq!(
                        peers,
index af854c8317d059444b9ee2df6598bb1edbeeccb7..49e3019b858f362b0192ffd409835950ca6826db 100644 (file)
@@ -14,6 +14,7 @@ use crate::types::{GossipMessage, GossipChainAccess, GossipPeerManager};
 use crate::verifier::ChainVerifier;
 
 pub(crate) struct GossipCounter {
+       pub(crate) node_announcements: u64,
        pub(crate) channel_announcements: u64,
        pub(crate) channel_updates: u64,
        pub(crate) channel_updates_without_htlc_max_msats: u64,
@@ -23,6 +24,7 @@ pub(crate) struct GossipCounter {
 impl GossipCounter {
        pub(crate) fn new() -> Self {
                Self {
+                       node_announcements: 0,
                        channel_announcements: 0,
                        channel_updates: 0,
                        channel_updates_without_htlc_max_msats: 0,
@@ -71,6 +73,21 @@ impl<L: Deref + Clone + Send + Sync> GossipRouter<L> where L::Target: Logger {
                }
        }
 
+       fn new_node_announcement(&self, msg: NodeAnnouncement) {
+               {
+                       let mut counter = self.counter.write().unwrap();
+                       counter.node_announcements += 1;
+               }
+
+               let gossip_message = GossipMessage::NodeAnnouncement(msg, None);
+               if let Err(err) = self.sender.try_send(gossip_message) {
+                       let gossip_message = match err { TrySendError::Full(msg)|TrySendError::Closed(msg) => msg };
+                       tokio::task::block_in_place(move || { tokio::runtime::Handle::current().block_on(async move {
+                               self.sender.send(gossip_message).await.unwrap();
+                       })});
+               }
+       }
+
        fn new_channel_update(&self, msg: ChannelUpdate) {
                self.counter.write().unwrap().channel_updates += 1;
                let gossip_message = GossipMessage::ChannelUpdate(msg, None);
@@ -92,7 +109,9 @@ impl<L: Deref + Clone + Send + Sync> MessageSendEventsProvider for GossipRouter<
                                MessageSendEvent::BroadcastChannelAnnouncement { msg, .. } => {
                                        self.new_channel_announcement(msg);
                                },
-                               MessageSendEvent::BroadcastNodeAnnouncement { .. } => {},
+                               MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
+                                       self.new_node_announcement(msg);
+                               },
                                MessageSendEvent::BroadcastChannelUpdate { msg } => {
                                        self.new_channel_update(msg);
                                },
@@ -105,7 +124,9 @@ impl<L: Deref + Clone + Send + Sync> MessageSendEventsProvider for GossipRouter<
 
 impl<L: Deref + Clone + Send + Sync> RoutingMessageHandler for GossipRouter<L> where L::Target: Logger {
        fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result<bool, LightningError> {
-               self.native_router.handle_node_announcement(msg)
+               let res = self.native_router.handle_node_announcement(msg)?;
+               self.new_node_announcement(msg.clone());
+               Ok(res)
        }
 
        fn handle_channel_announcement(&self, msg: &ChannelAnnouncement) -> Result<bool, LightningError> {
index 64f0ff24616ec619cd95cbebe71b8808feb06f88..58fcc76fb50c70407e7d56abd521f9728d411c17 100644 (file)
@@ -26,7 +26,7 @@ use crate::config::SYMLINK_GRANULARITY_INTERVAL;
 use crate::lookup::DeltaSet;
 
 use crate::persistence::GossipPersister;
-use crate::serialization::UpdateSerialization;
+use crate::serialization::{SerializationSet, UpdateSerialization};
 use crate::snapshot::Snapshotter;
 use crate::types::RGSSLogger;
 
@@ -49,7 +49,7 @@ mod tests;
 /// sync formats arise in the future.
 ///
 /// The fourth byte is the protocol version in case our format gets updated.
-const GOSSIP_PREFIX: [u8; 4] = [76, 68, 75, 1];
+const GOSSIP_PREFIX: [u8; 3] = [76, 68, 75];
 
 pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
        network_graph: Arc<NetworkGraph<L>>,
@@ -59,7 +59,13 @@ pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
 pub struct SerializedResponse {
        pub data: Vec<u8>,
        pub message_count: u32,
-       pub announcement_count: u32,
+       pub node_announcement_count: u32,
+       /// Despite the name, the count of node announcements that have associated updates, be those
+       /// features, addresses, or both
+       pub node_update_count: u32,
+       pub node_feature_update_count: u32,
+       pub node_address_update_count: u32,
+       pub channel_announcement_count: u32,
        pub update_count: u32,
        pub update_count_full: u32,
        pub update_count_incremental: u32,
@@ -171,18 +177,31 @@ fn serialize_empty_blob(current_timestamp: u64) -> Vec<u8> {
        blob
 }
 
-async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, snapshot_reference_timestamp: Option<u64>, logger: L) -> SerializedResponse where L::Target: Logger {
+async fn calculate_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, snapshot_reference_timestamp: Option<u64>, logger: L) -> SerializationSet where L::Target: Logger {
        let client = connect_to_db().await;
 
        network_graph.remove_stale_channels_and_tracking();
 
-       let mut output: Vec<u8> = vec![];
-       let snapshot_interval = config::snapshot_generation_interval();
-
        // set a flag if the chain hash is prepended
        // chain hash only necessary if either channel announcements or non-incremental updates are present
        // for announcement-free incremental-only updates, chain hash can be skipped
 
+       let mut delta_set = DeltaSet::new();
+       lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, snapshot_reference_timestamp, logger.clone()).await;
+       log_info!(logger, "announcement channel count: {}", delta_set.len());
+       lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
+       log_info!(logger, "update-fetched channel count: {}", delta_set.len());
+       let node_delta_set = lookup::fetch_node_updates(&client, last_sync_timestamp, logger.clone()).await;
+       log_info!(logger, "update-fetched node count: {}", node_delta_set.len());
+       lookup::filter_delta_set(&mut delta_set, logger.clone());
+       log_info!(logger, "update-filtered channel count: {}", delta_set.len());
+       serialization::serialize_delta_set(delta_set, node_delta_set, last_sync_timestamp)
+}
+
+fn serialize_delta<L: Deref + Clone>(serialization_details: &SerializationSet, serialization_version: u8, logger: L) -> SerializedResponse where L::Target: Logger {
+       let mut output: Vec<u8> = vec![];
+       let snapshot_interval = config::snapshot_generation_interval();
+
        let mut node_id_set: HashSet<NodeId> = HashSet::new();
        let mut node_id_indices: HashMap<NodeId, usize> = HashMap::new();
        let mut node_ids: Vec<NodeId> = Vec::new();
@@ -199,21 +218,12 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
                node_id_indices[&node_id]
        };
 
-       let mut delta_set = DeltaSet::new();
-       lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, snapshot_reference_timestamp, logger.clone()).await;
-       log_info!(logger, "announcement channel count: {}", delta_set.len());
-       lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
-       log_info!(logger, "update-fetched channel count: {}", delta_set.len());
-       lookup::filter_delta_set(&mut delta_set, logger.clone());
-       log_info!(logger, "update-filtered channel count: {}", delta_set.len());
-       let serialization_details = serialization::serialize_delta_set(delta_set, last_sync_timestamp);
-
        // process announcements
        // write the number of channel announcements to the output
        let announcement_count = serialization_details.announcements.len() as u32;
        announcement_count.write(&mut output).unwrap();
        let mut previous_announcement_scid = 0;
-       for current_announcement in serialization_details.announcements {
+       for current_announcement in &serialization_details.announcements {
                let id_index_1 = get_node_id_index(current_announcement.node_id_1);
                let id_index_2 = get_node_id_index(current_announcement.node_id_2);
                let mut stripped_announcement = serialization::serialize_stripped_channel_announcement(&current_announcement, id_index_1, id_index_2, previous_announcement_scid);
@@ -227,7 +237,7 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
        let update_count = serialization_details.updates.len() as u32;
        update_count.write(&mut output).unwrap();
 
-       let default_update_values = serialization_details.full_update_defaults;
+       let default_update_values = &serialization_details.full_update_defaults;
        if update_count > 0 {
                default_update_values.cltv_expiry_delta.write(&mut output).unwrap();
                default_update_values.htlc_minimum_msat.write(&mut output).unwrap();
@@ -238,7 +248,7 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
 
        let mut update_count_full = 0;
        let mut update_count_incremental = 0;
-       for current_update in serialization_details.updates {
+       for current_update in &serialization_details.updates {
                match &current_update {
                        UpdateSerialization::Full(_) => {
                                update_count_full += 1;
@@ -258,6 +268,7 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
        let message_count = announcement_count + update_count;
 
        let mut prefixed_output = GOSSIP_PREFIX.to_vec();
+       prefixed_output.push(serialization_version);
 
        // always write the chain hash
        serialization_details.chain_hash.write(&mut prefixed_output).unwrap();
@@ -267,11 +278,97 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
        let serialized_seen_timestamp = latest_seen_timestamp.saturating_sub(overflow_seconds);
        serialized_seen_timestamp.write(&mut prefixed_output).unwrap();
 
+       if serialization_version >= 2 { // serialize the most common node features
+               for mutated_node_id in serialization_details.node_mutations.keys() {
+                       // consider mutated nodes outside channel announcements
+                       get_node_id_index(mutated_node_id.clone());
+               }
+
+               let default_feature_count = serialization_details.node_announcement_feature_defaults.len() as u8;
+               debug_assert!(default_feature_count <= config::NODE_DEFAULT_FEATURE_COUNT, "Default feature count cannot exceed maximum");
+               default_feature_count.write(&mut prefixed_output).unwrap();
+
+               for current_feature in &serialization_details.node_announcement_feature_defaults {
+                       current_feature.write(&mut prefixed_output).unwrap();
+               }
+       }
+
        let node_id_count = node_ids.len() as u32;
        node_id_count.write(&mut prefixed_output).unwrap();
 
+       let mut node_update_count = 0u32;
+       let mut node_feature_update_count = 0u32;
+       let mut node_address_update_count = 0u32;
+
        for current_node_id in node_ids {
-               current_node_id.write(&mut prefixed_output).unwrap();
+               let mut current_node_delta_serialization: Vec<u8> = Vec::new();
+               current_node_id.write(&mut current_node_delta_serialization).unwrap();
+
+               if serialization_version >= 2 {
+                       if let Some(node_delta) = serialization_details.node_mutations.get(&current_node_id) {
+                               /*
+                               Bitmap:
+                               7: expect extra data after the pubkey (a u16 for the count, and then that number of bytes)
+                               5-3: index of new features among default (1-6). If index is 7 (all 3 bits are set, it's
+                               outside the present default range). 0 means no feature changes.
+                               2: addresses have changed
+
+                               1: used for all keys
+                               0: used for odd keys
+                               */
+
+                               if node_delta.has_address_set_changed {
+                                       node_address_update_count += 1;
+
+                                       let address_set = &node_delta.latest_details_after_seen.as_ref().unwrap().addresses;
+                                       let mut address_serialization = Vec::new();
+
+                                       // we don't know a priori how many are <= 255 bytes
+                                       let mut total_address_count = 0u8;
+
+                                       for address in address_set.iter() {
+                                               if total_address_count == u8::MAX {
+                                                       // don't serialize more than 255 addresses
+                                                       break;
+                                               }
+                                               if let Ok(serialized_length) = u8::try_from(address.serialized_length()) {
+                                                       total_address_count += 1;
+                                                       serialized_length.write(&mut address_serialization).unwrap();
+                                                       address.write(&mut address_serialization).unwrap();
+                                               };
+                                       }
+
+                                       if total_address_count > 0 {
+                                               // signal the presence of node addresses
+                                               current_node_delta_serialization[0] |= 1 << 2;
+                                               // serialize the actual addresses and count
+                                               total_address_count.write(&mut current_node_delta_serialization).unwrap();
+                                               current_node_delta_serialization.append(&mut address_serialization);
+                                       }
+                               }
+
+                               if node_delta.has_feature_set_changed {
+                                       node_feature_update_count += 1;
+
+                                       let latest_features = &node_delta.latest_details_after_seen.as_ref().unwrap().features;
+
+                                       // are these features among the most common ones?
+                                       if let Some(index) = serialization_details.node_announcement_feature_defaults.iter().position(|f| f == latest_features) {
+                                               // this feature set is among the 6 defaults
+                                               current_node_delta_serialization[0] |= ((index + 1) as u8) << 3;
+                                       } else {
+                                               current_node_delta_serialization[0] |= 0b_0011_1000; // 7 << 3
+                                               latest_features.write(&mut current_node_delta_serialization).unwrap();
+                                       }
+                               }
+
+                               if node_delta.has_address_set_changed || node_delta.has_feature_set_changed {
+                                       node_update_count += 1;
+                               }
+                       }
+               }
+
+               prefixed_output.append(&mut current_node_delta_serialization);
        }
 
        prefixed_output.append(&mut output);
@@ -282,7 +379,11 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
        SerializedResponse {
                data: prefixed_output,
                message_count,
-               announcement_count,
+               node_announcement_count: node_id_count,
+               node_update_count,
+               node_feature_update_count,
+               node_address_update_count,
+               channel_announcement_count: announcement_count,
                update_count,
                update_count_full,
                update_count_incremental,
index fc0385c8d46ac56a107b3591006bfaa915800317..751ddf228145595332a42f884f5fe6424ac2dd1f 100644 (file)
@@ -1,16 +1,17 @@
-use std::collections::{BTreeMap, HashSet};
+use std::collections::{BTreeMap, HashMap, HashSet};
 use std::io::Cursor;
 use std::ops::Deref;
 use std::sync::Arc;
 use std::time::{Instant, SystemTime, UNIX_EPOCH};
 
-use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
-use lightning::routing::gossip::NetworkGraph;
+use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, SocketAddress, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
+use lightning::routing::gossip::{NetworkGraph, NodeId};
 use lightning::util::ser::Readable;
 use tokio_postgres::Client;
 
 use futures::StreamExt;
 use lightning::{log_debug, log_gossip, log_info};
+use lightning::ln::features::NodeFeatures;
 use lightning::util::logger::Logger;
 
 use crate::config;
@@ -19,6 +20,7 @@ use crate::serialization::MutatedProperties;
 /// The delta set needs to be a BTreeMap so the keys are sorted.
 /// That way, the scids in the response automatically grow monotonically
 pub(super) type DeltaSet = BTreeMap<u64, ChannelDelta>;
+pub(super) type NodeDeltaSet = HashMap<NodeId, NodeDelta>;
 
 pub(super) struct AnnouncementDelta {
        pub(super) seen: u32,
@@ -50,6 +52,31 @@ pub(super) struct ChannelDelta {
        pub(super) requires_reminder: bool,
 }
 
+pub(super) struct NodeDelta {
+       /// The most recently received, but new-to-the-client, node details
+       pub(super) latest_details_after_seen: Option<NodeDetails>,
+
+       /// Between last_details_before_seen and latest_details_after_seen, including any potential
+       /// intermediate updates that are not kept track of here, has the set of features this node
+       /// supports changed?
+       pub(super) has_feature_set_changed: bool,
+
+       /// Between last_details_before_seen and latest_details_after_seen, including any potential
+       /// intermediate updates that are not kept track of here, has the set of socket addresses this
+       /// node listens on changed?
+       pub(super) has_address_set_changed: bool,
+
+       /// The most recent node details that the client would have seen already
+       pub(super) last_details_before_seen: Option<NodeDetails>
+}
+
+pub(super) struct NodeDetails {
+       #[allow(unused)]
+       pub(super) seen: u32,
+       pub(super) features: NodeFeatures,
+       pub(super) addresses: HashSet<SocketAddress>
+}
+
 impl Default for ChannelDelta {
        fn default() -> Self {
                Self {
@@ -61,6 +88,17 @@ impl Default for ChannelDelta {
        }
 }
 
+impl Default for NodeDelta {
+       fn default() -> Self {
+               Self {
+                       latest_details_after_seen: None,
+                       has_feature_set_changed: false,
+                       has_address_set_changed: false,
+                       last_details_before_seen: None,
+               }
+       }
+}
+
 impl Default for DirectedUpdateDelta {
        fn default() -> Self {
                Self {
@@ -83,7 +121,7 @@ pub(super) async fn fetch_channel_announcements<L: Deref>(delta_set: &mut DeltaS
                log_info!(logger, "Retrieved read-only network graph copy");
                let channel_iterator = read_only_graph.channels().unordered_iter();
                channel_iterator
-                       .filter(|c| c.1.announcement_message.is_some())
+                       .filter(|c| c.1.announcement_message.is_some() && c.1.one_to_two.is_some() && c.1.two_to_one.is_some())
                        .map(|c| c.1.announcement_message.as_ref().unwrap().contents.short_channel_id as i64)
                        .collect::<Vec<_>>()
        };
@@ -365,7 +403,6 @@ pub(super) async fn fetch_channel_updates<L: Deref>(delta_set: &mut DeltaSet, cl
        let mut previous_scid = u64::MAX;
        let mut previously_seen_directions = (false, false);
 
-       // let mut previously_seen_directions = (false, false);
        let mut intermediate_update_count = 0;
        while let Some(row_res) = pinned_updates.next().await {
                let intermediate_update = row_res.unwrap();
@@ -437,6 +474,108 @@ pub(super) async fn fetch_channel_updates<L: Deref>(delta_set: &mut DeltaSet, cl
        log_info!(logger, "Processed intermediate rows ({}) (delta size: {}): {:?}", intermediate_update_count, delta_set.len(), start.elapsed());
 }
 
+pub(super) async fn fetch_node_updates<L: Deref>(client: &Client, last_sync_timestamp: u32, logger: L) -> NodeDeltaSet where L::Target: Logger {
+       let start = Instant::now();
+       let last_sync_timestamp_float = last_sync_timestamp as f64;
+
+       let mut delta_set = NodeDeltaSet::new();
+
+       // get the latest node updates prior to last_sync_timestamp
+       let reference_rows = client.query_raw("
+               SELECT DISTINCT ON (public_key) public_key, CAST(EXTRACT('epoch' from seen) AS BIGINT) AS seen, announcement_signed
+               FROM node_announcements
+               WHERE seen < TO_TIMESTAMP($1)
+               ORDER BY public_key ASC, seen DESC
+               ", [last_sync_timestamp_float]).await.unwrap();
+       let mut pinned_rows = Box::pin(reference_rows);
+
+       log_info!(logger, "Fetched node announcement reference rows in {:?}", start.elapsed());
+
+       let mut reference_row_count = 0;
+
+       while let Some(row_res) = pinned_rows.next().await {
+               let current_reference = row_res.unwrap();
+
+               let seen = current_reference.get::<_, i64>("seen") as u32;
+               let blob: Vec<u8> = current_reference.get("announcement_signed");
+               let mut readable = Cursor::new(blob);
+               let unsigned_node_announcement = NodeAnnouncement::read(&mut readable).unwrap().contents;
+               let node_id = unsigned_node_announcement.node_id;
+
+               let current_node_delta = delta_set.entry(node_id).or_insert(NodeDelta::default());
+               (*current_node_delta).last_details_before_seen.get_or_insert_with(|| {
+                       let address_set: HashSet<SocketAddress> = unsigned_node_announcement.addresses.into_iter().collect();
+                       NodeDetails {
+                               seen,
+                               features: unsigned_node_announcement.features,
+                               addresses: address_set,
+                       }
+               });
+               log_gossip!(logger, "Node {} last update before seen: {} (seen at {})", node_id, unsigned_node_announcement.timestamp, seen);
+
+               reference_row_count += 1;
+       }
+
+
+       log_info!(logger, "Processed {} node announcement reference rows (delta size: {}) in {:?}",
+               reference_row_count, delta_set.len(), start.elapsed());
+
+       // get all the intermediate node updates
+       // (to calculate the set of mutated fields for snapshotting, where intermediate updates may
+       // have been omitted)
+       let intermediate_updates = client.query_raw("
+               SELECT announcement_signed, CAST(EXTRACT('epoch' from seen) AS BIGINT) AS seen
+               FROM node_announcements
+               WHERE seen >= TO_TIMESTAMP($1)
+               ORDER BY public_key ASC, timestamp DESC
+               ", [last_sync_timestamp_float]).await.unwrap();
+       let mut pinned_updates = Box::pin(intermediate_updates);
+       log_info!(logger, "Fetched intermediate node announcement rows in {:?}", start.elapsed());
+
+       let mut previous_node_id: Option<NodeId> = None;
+
+       let mut intermediate_update_count = 0;
+       while let Some(row_res) = pinned_updates.next().await {
+               let intermediate_update = row_res.unwrap();
+               intermediate_update_count += 1;
+
+               let current_seen_timestamp = intermediate_update.get::<_, i64>("seen") as u32;
+               let blob: Vec<u8> = intermediate_update.get("announcement_signed");
+               let mut readable = Cursor::new(blob);
+               let unsigned_node_announcement = NodeAnnouncement::read(&mut readable).unwrap().contents;
+
+               let node_id = unsigned_node_announcement.node_id;
+               let is_previously_processed_node_id = Some(node_id) == previous_node_id;
+
+               // get this node's address set
+               let current_node_delta = delta_set.entry(node_id).or_insert(NodeDelta::default());
+               let address_set: HashSet<SocketAddress> = unsigned_node_announcement.addresses.into_iter().collect();
+
+               // determine mutations
+               if let Some(last_seen_update) = current_node_delta.last_details_before_seen.as_ref() {
+                       if unsigned_node_announcement.features != last_seen_update.features {
+                               current_node_delta.has_feature_set_changed = true;
+                       }
+                       if address_set != last_seen_update.addresses {
+                               current_node_delta.has_address_set_changed = true;
+                       }
+               }
+
+               if !is_previously_processed_node_id {
+                       (*current_node_delta).latest_details_after_seen.get_or_insert(NodeDetails {
+                               seen: current_seen_timestamp,
+                               features: unsigned_node_announcement.features,
+                               addresses: address_set,
+                       });
+               }
+
+               previous_node_id = Some(node_id);
+       }
+       log_info!(logger, "Processed intermediate node announcement rows ({}) (delta size: {}): {:?}", intermediate_update_count, delta_set.len(), start.elapsed());
+
+       delta_set
+}
+
 pub(super) fn filter_delta_set<L: Deref>(delta_set: &mut DeltaSet, logger: L) where L::Target: Logger {
        let original_length = delta_set.len();
        let keys: Vec<u64> = delta_set.keys().cloned().collect();
index a7cfb37caca8c17c9a21541f9e10d934d2c13477..04c6b9a0c4076cab725b90b5a4ec769718d183a0 100644 (file)
@@ -69,21 +69,20 @@ impl<L: Deref> GossipPersister<L> where L::Target: Logger {
                                panic!("db init error: {}", initialization_error);
                        }
 
-                       let initialization = client
-                               .execute(config::db_announcement_table_creation_query(), &[])
-                               .await;
-                       if let Err(initialization_error) = initialization {
-                               panic!("db init error: {}", initialization_error);
-                       }
+                       let table_creation_queries = [
+                               config::db_announcement_table_creation_query(),
+                               config::db_channel_update_table_creation_query(),
+                               config::db_channel_update_table_creation_query(),
+                               config::db_node_announcement_table_creation_query()
+                       ];
 
-                       let initialization = client
-                               .execute(
-                                       config::db_channel_update_table_creation_query(),
-                                       &[],
-                               )
-                               .await;
-                       if let Err(initialization_error) = initialization {
-                               panic!("db init error: {}", initialization_error);
+                       for current_table_creation_query in table_creation_queries {
+                               let initialization = client
+                                       .execute(current_table_creation_query, &[])
+                                       .await;
+                               if let Err(initialization_error) = initialization {
+                                       panic!("db init error: {}", initialization_error);
+                               }
                        }
 
                        let initialization = client
@@ -133,6 +132,59 @@ impl<L: Deref> GossipPersister<L> where L::Target: Logger {
 
                        let connections_cache_ref = Arc::clone(&connections_cache);
                        match gossip_message {
+                               GossipMessage::NodeAnnouncement(announcement, seen_override) => {
+                                       let public_key_hex = announcement.contents.node_id.to_string();
+
+                                       let mut announcement_signed = Vec::new();
+                                       announcement.write(&mut announcement_signed).unwrap();
+
+                                       let features = announcement.contents.features.encode();
+                                       let timestamp = announcement.contents.timestamp as i64;
+
+                                       let mut serialized_addresses = Vec::new();
+                                       announcement.contents.addresses.write(&mut serialized_addresses).unwrap();
+
+                                       let _task = self.tokio_runtime.spawn(async move {
+                                               if cfg!(test) && seen_override.is_some() {
+                                                       tokio::time::timeout(POSTGRES_INSERT_TIMEOUT, client
+                                                               .execute("INSERT INTO node_announcements (\
+                                                               public_key, \
+                                                               features, \
+                                                               socket_addresses, \
+                                                               timestamp, \
+                                                               announcement_signed, \
+                                                               seen \
+                                                       ) VALUES ($1, $2, $3, $4, $5, TO_TIMESTAMP($6))", &[
+                                                                       &public_key_hex,
+                                                                       &features,
+                                                                       &serialized_addresses,
+                                                                       &timestamp,
+                                                                       &announcement_signed,
+                                                                       &(seen_override.unwrap() as f64)
+                                                               ])).await.unwrap().unwrap();
+                                               } else {
+                                                       tokio::time::timeout(POSTGRES_INSERT_TIMEOUT, client
+                                                               .execute("INSERT INTO node_announcements (\
+                                                               public_key, \
+                                                               features, \
+                                                               socket_addresses, \
+                                                               timestamp, \
+                                                               announcement_signed \
+                                                       ) VALUES ($1, $2, $3, $4, $5)", &[
+                                                                       &public_key_hex,
+                                                                       &features,
+                                                                       &serialized_addresses,
+                                                                       &timestamp,
+                                                                       &announcement_signed,
+                                                               ])).await.unwrap().unwrap();
+                                               }
+                                               let mut connections_set = connections_cache_ref.lock().await;
+                                               connections_set.push(client);
+                                               limiter_ref.add_permits(1);
+                                       });
+                                       #[cfg(test)]
+                                       tasks_spawned.push(_task);
+                               },
                                GossipMessage::ChannelAnnouncement(announcement, seen_override) => {
                                        let scid = announcement.contents.short_channel_id as i64;
 
index b9306d8f54762aa66bee33cb92f515da0dbce687..5f11b275f6799f3f61baa88d6825645a9dde89cc 100644 (file)
@@ -4,16 +4,19 @@ use std::time::{SystemTime, UNIX_EPOCH};
 
 use bitcoin::Network;
 use bitcoin::blockdata::constants::ChainHash;
+use lightning::ln::features::NodeFeatures;
 use lightning::ln::msgs::{UnsignedChannelAnnouncement, UnsignedChannelUpdate};
 use lightning::util::ser::{BigSize, Writeable};
 use crate::config;
 
-use crate::lookup::{DeltaSet, DirectedUpdateDelta};
+use crate::lookup::{DeltaSet, DirectedUpdateDelta, NodeDeltaSet};
 
 pub(super) struct SerializationSet {
        pub(super) announcements: Vec<UnsignedChannelAnnouncement>,
        pub(super) updates: Vec<UpdateSerialization>,
        pub(super) full_update_defaults: DefaultUpdateValues,
+       pub(super) node_announcement_feature_defaults: Vec<NodeFeatures>,
+       pub(super) node_mutations: NodeDeltaSet,
        pub(super) latest_seen: u32,
        pub(super) chain_hash: ChainHash,
 }
@@ -104,11 +107,13 @@ struct FullUpdateValueHistograms {
        htlc_maximum_msat: HashMap<u64, usize>,
 }
 
-pub(super) fn serialize_delta_set(delta_set: DeltaSet, last_sync_timestamp: u32) -> SerializationSet {
+pub(super) fn serialize_delta_set(channel_delta_set: DeltaSet, node_delta_set: NodeDeltaSet, last_sync_timestamp: u32) -> SerializationSet {
        let mut serialization_set = SerializationSet {
                announcements: vec![],
                updates: vec![],
                full_update_defaults: Default::default(),
+               node_announcement_feature_defaults: vec![],
+               node_mutations: Default::default(),
                chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
                latest_seen: 0,
        };
@@ -134,7 +139,7 @@ pub(super) fn serialize_delta_set(delta_set: DeltaSet, last_sync_timestamp: u32)
        // if the previous seen update happened more than 6 days ago, the client may have pruned it, and an incremental update wouldn't work
        let non_incremental_previous_update_threshold_timestamp = SystemTime::now().checked_sub(config::CHANNEL_REMINDER_AGE).unwrap().duration_since(UNIX_EPOCH).unwrap().as_secs() as u32;
 
-       for (scid, channel_delta) in delta_set.into_iter() {
+       for (scid, channel_delta) in channel_delta_set.into_iter() {
 
                // any announcement chain hash is gonna be the same value. Just set it from the first one.
                let channel_announcement_delta = channel_delta.announcement.as_ref().unwrap();
@@ -214,6 +219,22 @@ pub(super) fn serialize_delta_set(delta_set: DeltaSet, last_sync_timestamp: u32)
        };
 
        serialization_set.full_update_defaults = default_update_values;
+
+       serialization_set.node_mutations = node_delta_set.into_iter().filter(|(_id, delta)| {
+               // either something changed, or this node is new
+               delta.has_feature_set_changed || delta.has_address_set_changed || delta.last_details_before_seen.is_none()
+       }).collect();
+
+       let mut node_feature_histogram: HashMap<&NodeFeatures, usize> = Default::default();
+       for (_id, delta) in serialization_set.node_mutations.iter() {
+               if delta.has_feature_set_changed || delta.last_details_before_seen.is_none() {
+                       if let Some(latest_details) = delta.latest_details_after_seen.as_ref() {
+                               *node_feature_histogram.entry(&latest_details.features).or_insert(0) += 1;
+                       };
+               }
+       }
+       serialization_set.node_announcement_feature_defaults = find_leading_histogram_entries(node_feature_histogram, config::NODE_DEFAULT_FEATURE_COUNT as usize);
+
        serialization_set
 }
 
@@ -327,3 +348,9 @@ pub(super) fn find_most_common_histogram_entry_with_default<T: Copy>(histogram:
        // though for htlc maximum msat it could be a u64::max
        default
 }
+
+pub(super) fn find_leading_histogram_entries(histogram: HashMap<&NodeFeatures, usize>, count: usize) -> Vec<NodeFeatures> {
+       let mut entry_counts: Vec<_> = histogram.iter().filter(|&(_, &count)| count > 1).collect();
+       entry_counts.sort_by(|a, b| b.1.cmp(&a.1));
+       entry_counts.into_iter().take(count).map(|(&features, _count)| features.clone()).collect()
+}
index c5154c791f9e072a67f3ee52ea2babc89a6abaf4..896783efd3caf7c632eb2fa07f003216c6f838d3 100644 (file)
@@ -92,14 +92,20 @@ impl<L: Deref + Clone> Snapshotter<L> where L::Target: Logger {
                // channel updates
 
                // purge and recreate the pending directories
-               if fs::metadata(&pending_snapshot_directory).is_ok() {
-                       fs::remove_dir_all(&pending_snapshot_directory).expect("Failed to remove pending snapshot directory.");
-               }
-               if fs::metadata(&pending_symlink_directory).is_ok() {
-                       fs::remove_dir_all(&pending_symlink_directory).expect("Failed to remove pending symlink directory.");
+               let suffixes = [("", ""), ("/v2", "../")];
+               for (suffix, _) in suffixes {
+                       let versioned_snapshot_directory = format!("{}{}", pending_snapshot_directory, suffix);
+                       let versioned_symlink_directory = format!("{}{}", pending_symlink_directory, suffix);
+
+                       if fs::metadata(&versioned_snapshot_directory).is_ok() {
+                               fs::remove_dir_all(&versioned_snapshot_directory).expect("Failed to remove pending snapshot directory.");
+                       }
+                       if fs::metadata(&versioned_symlink_directory).is_ok() {
+                               fs::remove_dir_all(&versioned_symlink_directory).expect("Failed to remove pending symlink directory.");
+                       }
+                       fs::create_dir_all(&versioned_snapshot_directory).expect("Failed to create pending snapshot directory");
+                       fs::create_dir_all(&versioned_symlink_directory).expect("Failed to create pending symlink directory");
                }
-               fs::create_dir_all(&pending_snapshot_directory).expect("Failed to create pending snapshot directory");
-               fs::create_dir_all(&pending_symlink_directory).expect("Failed to create pending symlink directory");
 
                let mut snapshot_sync_timestamps: Vec<(u64, u64)> = Vec::new();
                for current_scope in snapshot_scopes {
@@ -114,13 +120,17 @@ impl<L: Deref + Clone> Snapshotter<L> where L::Target: Logger {
                        {
                                log_info!(self.logger, "Calculating {}-second snapshot", current_scope);
                                // calculate the snapshot
-                               let snapshot = super::serialize_delta(network_graph_clone, current_last_sync_timestamp.clone() as u32, Some(reference_timestamp), self.logger.clone()).await;
+                               let delta = super::calculate_delta(network_graph_clone.clone(), current_last_sync_timestamp.clone() as u32, Some(reference_timestamp), self.logger.clone()).await;
+                               let snapshot_v1 = super::serialize_delta(&delta, 1, self.logger.clone());
+                               let snapshot_v2 = super::serialize_delta(&delta, 2, self.logger.clone());
 
                                // persist the snapshot and update the symlink
                                let snapshot_filename = format!("snapshot__calculated-at:{}__range:{}-scope__previous-sync:{}.lngossip", reference_timestamp, current_scope, current_last_sync_timestamp);
-                               let snapshot_path = format!("{}/{}", pending_snapshot_directory, snapshot_filename);
-                               log_info!(self.logger, "Persisting {}-second snapshot: {} ({} messages, {} announcements, {} updates ({} full, {} incremental))", current_scope, snapshot_filename, snapshot.message_count, snapshot.announcement_count, snapshot.update_count, snapshot.update_count_full, snapshot.update_count_incremental);
-                               fs::write(&snapshot_path, snapshot.data).unwrap();
+                               let snapshot_path_v1 = format!("{}/{}", pending_snapshot_directory, snapshot_filename);
+                               let snapshot_path_v2 = format!("{}/v2/{}", pending_snapshot_directory, snapshot_filename);
+                               log_info!(self.logger, "Persisting {}-second snapshot: {} ({} messages, {} announcements, {} updates ({} full, {} incremental))", current_scope, snapshot_filename, snapshot_v1.message_count, snapshot_v1.channel_announcement_count, snapshot_v1.update_count, snapshot_v1.update_count_full, snapshot_v1.update_count_incremental);
+                               fs::write(&snapshot_path_v1, snapshot_v1.data).unwrap();
+                               fs::write(&snapshot_path_v2, snapshot_v2.data).unwrap();
                                snapshot_filenames_by_scope.insert(current_scope.clone(), snapshot_filename);
                        }
                }
@@ -175,19 +185,21 @@ impl<L: Deref + Clone> Snapshotter<L> where L::Target: Logger {
                        };
                        log_info!(self.logger, "i: {}, referenced scope: {}", i, referenced_scope);
 
-                       let snapshot_filename = snapshot_filenames_by_scope.get(&referenced_scope).unwrap();
-                       let relative_snapshot_path = format!("{}/{}", relative_symlink_to_snapshot_path, snapshot_filename);
+                       for (suffix, path_to_root) in suffixes {
+                               let snapshot_filename = snapshot_filenames_by_scope.get(&referenced_scope).unwrap();
+                               let relative_snapshot_path = format!("{}{}{}/{}", path_to_root, relative_symlink_to_snapshot_path, suffix, snapshot_filename);
 
-                       let canonical_last_sync_timestamp = if i == 0 {
-                               // special-case 0 to always refer to a full/initial sync
-                               0
-                       } else {
-                               reference_timestamp.saturating_sub(granularity_interval.saturating_mul(i))
-                       };
-                       let symlink_path = format!("{}/{}.bin", pending_symlink_directory, canonical_last_sync_timestamp);
+                               let canonical_last_sync_timestamp = if i == 0 {
+                                       // special-case 0 to always refer to a full/initial sync
+                                       0
+                               } else {
+                                       reference_timestamp.saturating_sub(granularity_interval.saturating_mul(i))
+                               };
+                               let symlink_path = format!("{}{}/{}.bin", pending_symlink_directory, suffix, canonical_last_sync_timestamp);
 
-                       log_info!(self.logger, "Symlinking: {} -> {} ({} -> {}", i, referenced_scope, symlink_path, relative_snapshot_path);
-                       symlink(&relative_snapshot_path, &symlink_path).unwrap();
+                               log_info!(self.logger, "Symlinking: {} -> {} ({} -> {}", i, referenced_scope, symlink_path, relative_snapshot_path);
+                               symlink(&relative_snapshot_path, &symlink_path).unwrap();
+                       }
                }
 
                let update_time_path = format!("{}/update_time.txt", pending_symlink_directory);
index e668f0b473409193872a12910720be284e2abe94..bf5cd4a8b0e8c3519f7956c26ca1e8be1eac4435 100644 (file)
@@ -11,12 +11,12 @@ use bitcoin::secp256k1::{Secp256k1, SecretKey};
 use bitcoin::hashes::Hash;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use hex_conservative::DisplayHex;
-use lightning::ln::features::ChannelFeatures;
-use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
-use lightning::routing::gossip::{NetworkGraph, NodeId};
+use lightning::ln::features::{ChannelFeatures, NodeFeatures};
+use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, SocketAddress, UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement};
+use lightning::routing::gossip::{NetworkGraph, NodeAlias, NodeId};
 use lightning::util::ser::Writeable;
 use lightning_rapid_gossip_sync::RapidGossipSync;
-use crate::{config, serialize_delta};
+use crate::{calculate_delta, config, serialize_delta};
 use crate::persistence::GossipPersister;
 use crate::snapshot::Snapshotter;
 use crate::types::{GossipMessage, tests::TestLogger};
@@ -47,7 +47,35 @@ pub(crate) fn db_test_schema() -> String {
        })
 }
 
-fn generate_announcement(short_channel_id: u64) -> ChannelAnnouncement {
+fn generate_node_announcement(private_key: Option<SecretKey>) -> NodeAnnouncement {
+       let secp_context = Secp256k1::new();
+
+       let random_private_key = private_key.unwrap_or(SecretKey::from_slice(&[1; 32]).unwrap());
+       let random_public_key = random_private_key.public_key(&secp_context);
+       let node_id = NodeId::from_pubkey(&random_public_key);
+
+       let announcement = UnsignedNodeAnnouncement {
+               features: NodeFeatures::empty(),
+               timestamp: 0,
+               node_id,
+               rgb: [0, 128, 255],
+               alias: NodeAlias([0; 32]),
+               addresses: vec![],
+               excess_data: vec![],
+               excess_address_data: vec![],
+       };
+
+       let msg_hash = bitcoin::secp256k1::Message::from_slice(&Sha256dHash::hash(&announcement.encode()[..])[..]).unwrap();
+       let signature = secp_context.sign_ecdsa(&msg_hash, &random_private_key);
+
+       NodeAnnouncement {
+               signature,
+               contents: announcement,
+       }
+}
+
+
+fn generate_channel_announcement(short_channel_id: u64) -> ChannelAnnouncement {
        let secp_context = Secp256k1::new();
 
        let random_private_key_1 = SecretKey::from_slice(&[1; 32]).unwrap();
@@ -205,7 +233,7 @@ async fn test_trivial_setup() {
        println!("timestamp: {}", timestamp);
 
        { // seed the db
-               let announcement = generate_announcement(short_channel_id);
+               let announcement = generate_channel_announcement(short_channel_id);
                let update_1 = generate_update(short_channel_id, false, timestamp, 0, 0, 0, 5, 0);
                let update_2 = generate_update(short_channel_id, true, timestamp, 0, 0, 0, 10, 0);
 
@@ -220,7 +248,8 @@ async fn test_trivial_setup() {
                persister.persist_gossip().await;
        }
 
-       let serialization = serialize_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+       let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+       let serialization = serialize_delta(&delta, 1, logger.clone());
        logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
        clean_test_db().await;
 
@@ -228,7 +257,7 @@ async fn test_trivial_setup() {
 
        assert_eq!(channel_count, 1);
        assert_eq!(serialization.message_count, 3);
-       assert_eq!(serialization.announcement_count, 1);
+       assert_eq!(serialization.channel_announcement_count, 1);
        assert_eq!(serialization.update_count, 2);
 
        let client_graph = NetworkGraph::new(Network::Bitcoin, logger.clone());
@@ -265,6 +294,126 @@ async fn test_trivial_setup() {
        }).await.unwrap();
 }
 
+#[tokio::test]
+async fn test_node_announcement_persistence() {
+       let _sanitizer = SchemaSanitizer::new();
+       let logger = Arc::new(TestLogger::new());
+       let network_graph = NetworkGraph::new(Network::Bitcoin, logger.clone());
+       let network_graph_arc = Arc::new(network_graph);
+       let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
+
+       { // seed the db
+               let mut announcement = generate_node_announcement(None);
+               receiver.send(GossipMessage::NodeAnnouncement(announcement.clone(), None)).await.unwrap();
+               receiver.send(GossipMessage::NodeAnnouncement(announcement.clone(), Some(12345))).await.unwrap();
+
+               {
+                       // modify announcement to contain a bunch of addresses
+                       announcement.contents.addresses.push(SocketAddress::Hostname {
+                               hostname: "google.com".to_string().try_into().unwrap(),
+                               port: 443,
+                       });
+                       announcement.contents.addresses.push(SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 9635 });
+                       announcement.contents.addresses.push(SocketAddress::TcpIpV6 { addr: [1; 16], port: 1337 });
+                       announcement.contents.addresses.push(SocketAddress::OnionV2([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]));
+                       announcement.contents.addresses.push(SocketAddress::OnionV3 {
+                               ed25519_pubkey: [1; 32],
+                               checksum: 2,
+                               version: 3,
+                               port: 4,
+                       });
+               }
+               receiver.send(GossipMessage::NodeAnnouncement(announcement, Some(12345))).await.unwrap();
+
+               drop(receiver);
+               persister.persist_gossip().await;
+
+               tokio::task::spawn_blocking(move || {
+                       drop(persister);
+               }).await.unwrap();
+       }
+       clean_test_db().await;
+}
+
+#[tokio::test]
+async fn test_node_announcement_delta_detection() {
+       let _sanitizer = SchemaSanitizer::new();
+       let logger = Arc::new(TestLogger::new());
+       let network_graph = NetworkGraph::new(Network::Bitcoin, logger.clone());
+       let network_graph_arc = Arc::new(network_graph);
+       let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
+
+       let timestamp = current_time() - 10;
+
+       { // seed the db
+               let mut announcement = generate_node_announcement(None);
+               receiver.send(GossipMessage::NodeAnnouncement(announcement.clone(), Some(timestamp - 10))).await.unwrap();
+               receiver.send(GossipMessage::NodeAnnouncement(announcement.clone(), Some(timestamp - 8))).await.unwrap();
+
+               {
+                       let mut current_announcement = generate_node_announcement(Some(SecretKey::from_slice(&[2; 32]).unwrap()));
+                       current_announcement.contents.features = NodeFeatures::from_be_bytes(vec![23, 48]);
+                       receiver.send(GossipMessage::NodeAnnouncement(current_announcement, Some(timestamp))).await.unwrap();
+               }
+
+               {
+                       let mut current_announcement = generate_node_announcement(Some(SecretKey::from_slice(&[3; 32]).unwrap()));
+                       current_announcement.contents.features = NodeFeatures::from_be_bytes(vec![22, 49]);
+                       receiver.send(GossipMessage::NodeAnnouncement(current_announcement, Some(timestamp))).await.unwrap();
+               }
+
+               {
+                       // modify announcement to contain a bunch of addresses
+                       announcement.contents.addresses.push(SocketAddress::Hostname {
+                               hostname: "google.com".to_string().try_into().unwrap(),
+                               port: 443,
+                       });
+                       announcement.contents.features = NodeFeatures::from_be_bytes(vec![23, 48]);
+                       announcement.contents.addresses.push(SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 9635 });
+                       announcement.contents.addresses.push(SocketAddress::TcpIpV6 { addr: [1; 16], port: 1337 });
+                       announcement.contents.addresses.push(SocketAddress::OnionV2([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]));
+                       announcement.contents.addresses.push(SocketAddress::OnionV3 {
+                               ed25519_pubkey: [1; 32],
+                               checksum: 2,
+                               version: 3,
+                               port: 4,
+                       });
+               }
+               receiver.send(GossipMessage::NodeAnnouncement(announcement, Some(timestamp))).await.unwrap();
+
+               { // necessary for the node announcements to be considered relevant
+                       let announcement = generate_channel_announcement(1);
+                       let update_1 = generate_update(1, false, timestamp, 0, 0, 0, 6, 0);
+                       let update_2 = generate_update(1, true, timestamp, 0, 0, 0, 6, 0);
+
+                       network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
+                       network_graph_arc.update_channel_unsigned(&update_1.contents).unwrap();
+                       network_graph_arc.update_channel_unsigned(&update_2.contents).unwrap();
+
+                       receiver.send(GossipMessage::ChannelAnnouncement(announcement, Some(timestamp))).await.unwrap();
+                       receiver.send(GossipMessage::ChannelUpdate(update_1, Some(timestamp))).await.unwrap();
+                       receiver.send(GossipMessage::ChannelUpdate(update_2, Some(timestamp))).await.unwrap();
+               }
+
+               drop(receiver);
+               persister.persist_gossip().await;
+
+               tokio::task::spawn_blocking(move || {
+                       drop(persister);
+               }).await.unwrap();
+       }
+
+       let delta = calculate_delta(network_graph_arc.clone(), timestamp - 5, None, logger.clone()).await;
+       let serialization = serialize_delta(&delta, 2, logger.clone());
+       clean_test_db().await;
+
+       assert_eq!(serialization.message_count, 3);
+       assert_eq!(serialization.node_announcement_count, 3);
+       assert_eq!(serialization.node_update_count, 1);
+       assert_eq!(serialization.node_feature_update_count, 1);
+       assert_eq!(serialization.node_address_update_count, 1);
+}
+
 /// If a channel has only seen updates in one direction, it should not be announced
 #[tokio::test]
 async fn test_unidirectional_intermediate_update_consideration() {
@@ -280,7 +429,7 @@ async fn test_unidirectional_intermediate_update_consideration() {
        println!("timestamp: {}", timestamp);
 
        { // seed the db
-               let announcement = generate_announcement(short_channel_id);
+               let announcement = generate_channel_announcement(short_channel_id);
                let update_1 = generate_update(short_channel_id, false, timestamp, 0, 0, 0, 6, 0);
                let update_2 = generate_update(short_channel_id, true, timestamp + 1, 0, 0, 0, 3, 0);
                let update_3 = generate_update(short_channel_id, true, timestamp + 2, 0, 0, 0, 4, 0);
@@ -305,14 +454,15 @@ async fn test_unidirectional_intermediate_update_consideration() {
        let client_graph_arc = Arc::new(client_graph);
        let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
 
-       let serialization = serialize_delta(network_graph_arc.clone(), timestamp + 1, None, logger.clone()).await;
+       let delta = calculate_delta(network_graph_arc.clone(), timestamp + 1, None, logger.clone()).await;
+       let serialization = serialize_delta(&delta, 1, logger.clone());
 
        logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Fetched 1 update rows of the first update in a new direction", 1);
        logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed 1 reference rows", 1);
        logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed intermediate rows (2)", 1);
 
        assert_eq!(serialization.message_count, 3);
-       assert_eq!(serialization.announcement_count, 1);
+       assert_eq!(serialization.channel_announcement_count, 1);
        assert_eq!(serialization.update_count, 2);
        assert_eq!(serialization.update_count_full, 2);
        assert_eq!(serialization.update_count_incremental, 0);
@@ -348,7 +498,7 @@ async fn test_bidirectional_intermediate_update_consideration() {
        println!("timestamp: {}", timestamp);
 
        { // seed the db
-               let announcement = generate_announcement(short_channel_id);
+               let announcement = generate_channel_announcement(short_channel_id);
                let update_1 = generate_update(short_channel_id, false, timestamp, 0, 0, 0, 5, 0);
                let update_2 = generate_update(short_channel_id, false, timestamp + 1, 0, 0, 0, 4, 0);
                let update_3 = generate_update(short_channel_id, false, timestamp + 2, 0, 0, 0, 3, 0);
@@ -372,14 +522,15 @@ async fn test_bidirectional_intermediate_update_consideration() {
        let channel_count = network_graph_arc.read_only().channels().len();
        assert_eq!(channel_count, 1);
 
-       let serialization = serialize_delta(network_graph_arc.clone(), timestamp + 1, None, logger.clone()).await;
+       let delta = calculate_delta(network_graph_arc.clone(), timestamp + 1, None, logger.clone()).await;
+       let serialization = serialize_delta(&delta, 1, logger.clone());
 
        logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Fetched 0 update rows of the first update in a new direction", 1);
        logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed 2 reference rows", 1);
        logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed intermediate rows (2)", 1);
 
        assert_eq!(serialization.message_count, 1);
-       assert_eq!(serialization.announcement_count, 0);
+       assert_eq!(serialization.channel_announcement_count, 0);
        assert_eq!(serialization.update_count, 1);
        assert_eq!(serialization.update_count_full, 0);
        assert_eq!(serialization.update_count_incremental, 1);
@@ -407,7 +558,7 @@ async fn test_channel_reminders() {
        { // seed the db
                { // unupdated channel
                        let short_channel_id = 1;
-                       let announcement = generate_announcement(short_channel_id);
+                       let announcement = generate_channel_announcement(short_channel_id);
                        let update_1 = generate_update(short_channel_id, false, timestamp - channel_reminder_delta - 1, 0, 0, 0, 5, 0);
                        let update_2 = generate_update(short_channel_id, true, timestamp - channel_reminder_delta - 1, 0, 0, 0, 3, 0);
 
@@ -421,7 +572,7 @@ async fn test_channel_reminders() {
                }
                { // unmodified but updated channel
                        let short_channel_id = 2;
-                       let announcement = generate_announcement(short_channel_id);
+                       let announcement = generate_channel_announcement(short_channel_id);
                        let update_1 = generate_update(short_channel_id, false, timestamp - channel_reminder_delta - 10, 0, 0, 0, 5, 0);
                        // in the false direction, we have one update that's different prior
                        let update_2 = generate_update(short_channel_id, false, timestamp - channel_reminder_delta - 5, 0, 1, 0, 5, 0);
@@ -455,7 +606,8 @@ async fn test_channel_reminders() {
        let channel_count = network_graph_arc.read_only().channels().len();
        assert_eq!(channel_count, 2);
 
-       let serialization = serialize_delta(network_graph_arc.clone(), timestamp - channel_reminder_delta + 15, None, logger.clone()).await;
+       let delta = calculate_delta(network_graph_arc.clone(), timestamp - channel_reminder_delta + 15, None, logger.clone()).await;
+       let serialization = serialize_delta(&delta, 1, logger.clone());
 
        logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Fetched 0 update rows of the first update in a new direction", 1);
        logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Fetched 4 update rows of the latest update in the less recently updated direction", 1);
@@ -463,7 +615,7 @@ async fn test_channel_reminders() {
        logger.assert_log_contains("rapid_gossip_sync_server::lookup", "Processed intermediate rows (2)", 1);
 
        assert_eq!(serialization.message_count, 4);
-       assert_eq!(serialization.announcement_count, 0);
+       assert_eq!(serialization.channel_announcement_count, 0);
        assert_eq!(serialization.update_count, 4);
        assert_eq!(serialization.update_count_full, 0);
        assert_eq!(serialization.update_count_incremental, 4);
@@ -488,7 +640,7 @@ async fn test_full_snapshot_recency() {
 
        { // seed the db
                let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
-               let announcement = generate_announcement(short_channel_id);
+               let announcement = generate_channel_announcement(short_channel_id);
                network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
                receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
 
@@ -524,14 +676,15 @@ async fn test_full_snapshot_recency() {
        let client_graph_arc = Arc::new(client_graph);
 
        { // sync after initial seed
-               let serialization = serialize_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let serialization = serialize_delta(&delta, 1, logger.clone());
                logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
 
                let channel_count = network_graph_arc.read_only().channels().len();
 
                assert_eq!(channel_count, 1);
                assert_eq!(serialization.message_count, 3);
-               assert_eq!(serialization.announcement_count, 1);
+               assert_eq!(serialization.channel_announcement_count, 1);
                assert_eq!(serialization.update_count, 2);
 
                let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
@@ -568,7 +721,7 @@ async fn test_full_snapshot_recency_with_wrong_seen_order() {
 
        { // seed the db
                let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
-               let announcement = generate_announcement(short_channel_id);
+               let announcement = generate_channel_announcement(short_channel_id);
                network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
                receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
 
@@ -604,14 +757,15 @@ async fn test_full_snapshot_recency_with_wrong_seen_order() {
        let client_graph_arc = Arc::new(client_graph);
 
        { // sync after initial seed
-               let serialization = serialize_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let serialization = serialize_delta(&delta, 1, logger.clone());
                logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
 
                let channel_count = network_graph_arc.read_only().channels().len();
 
                assert_eq!(channel_count, 1);
                assert_eq!(serialization.message_count, 3);
-               assert_eq!(serialization.announcement_count, 1);
+               assert_eq!(serialization.channel_announcement_count, 1);
                assert_eq!(serialization.update_count, 2);
 
                let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
@@ -648,7 +802,7 @@ async fn test_full_snapshot_recency_with_wrong_propagation_order() {
 
        { // seed the db
                let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
-               let announcement = generate_announcement(short_channel_id);
+               let announcement = generate_channel_announcement(short_channel_id);
                network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
                receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
 
@@ -683,14 +837,15 @@ async fn test_full_snapshot_recency_with_wrong_propagation_order() {
        let client_graph_arc = Arc::new(client_graph);
 
        { // sync after initial seed
-               let serialization = serialize_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let serialization = serialize_delta(&delta, 1, logger.clone());
                logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
 
                let channel_count = network_graph_arc.read_only().channels().len();
 
                assert_eq!(channel_count, 1);
                assert_eq!(serialization.message_count, 3);
-               assert_eq!(serialization.announcement_count, 1);
+               assert_eq!(serialization.channel_announcement_count, 1);
                assert_eq!(serialization.update_count, 2);
 
                let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
@@ -730,7 +885,7 @@ async fn test_full_snapshot_mutiny_scenario() {
 
        { // seed the db
                let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
-               let announcement = generate_announcement(short_channel_id);
+               let announcement = generate_channel_announcement(short_channel_id);
                network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
                receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
 
@@ -816,14 +971,15 @@ async fn test_full_snapshot_mutiny_scenario() {
        let client_graph_arc = Arc::new(client_graph);
 
        { // sync after initial seed
-               let serialization = serialize_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let serialization = serialize_delta(&delta, 1, logger.clone());
                logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 1", 1);
 
                let channel_count = network_graph_arc.read_only().channels().len();
 
                assert_eq!(channel_count, 1);
                assert_eq!(serialization.message_count, 3);
-               assert_eq!(serialization.announcement_count, 1);
+               assert_eq!(serialization.channel_announcement_count, 1);
                assert_eq!(serialization.update_count, 2);
 
                let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
@@ -867,13 +1023,13 @@ async fn test_full_snapshot_interlaced_channel_timestamps() {
                let secondary_channel_id = main_channel_id + 1;
 
                { // main channel
-                       let announcement = generate_announcement(main_channel_id);
+                       let announcement = generate_channel_announcement(main_channel_id);
                        network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
                        receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
                }
 
                { // secondary channel
-                       let announcement = generate_announcement(secondary_channel_id);
+                       let announcement = generate_channel_announcement(secondary_channel_id);
                        network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
                        receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
                }
@@ -929,14 +1085,15 @@ async fn test_full_snapshot_interlaced_channel_timestamps() {
        let client_graph_arc = Arc::new(client_graph);
 
        { // sync after initial seed
-               let serialization = serialize_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let delta = calculate_delta(network_graph_arc.clone(), 0, None, logger.clone()).await;
+               let serialization = serialize_delta(&delta, 1, logger.clone());
                logger.assert_log_contains("rapid_gossip_sync_server", "announcement channel count: 2", 1);
 
                let channel_count = network_graph_arc.read_only().channels().len();
 
                assert_eq!(channel_count, 2);
                assert_eq!(serialization.message_count, 6);
-               assert_eq!(serialization.announcement_count, 2);
+               assert_eq!(serialization.channel_announcement_count, 2);
                assert_eq!(serialization.update_count, 4);
 
                let rgs = RapidGossipSync::new(client_graph_arc.clone(), logger.clone());
@@ -975,7 +1132,7 @@ async fn test_full_snapshot_persistence() {
 
        { // seed the db
                let (mut persister, receiver) = GossipPersister::new(network_graph_arc.clone(), logger.clone());
-               let announcement = generate_announcement(short_channel_id);
+               let announcement = generate_channel_announcement(short_channel_id);
                network_graph_arc.update_channel_from_announcement_no_lookup(&announcement).unwrap();
                receiver.send(GossipMessage::ChannelAnnouncement(announcement, None)).await.unwrap();
 
index 0c6c9b2533a7404c556e4e3a15bfa673bd178bc5..f38a3760b316897b78729d4e96aa865c122b8969 100644 (file)
@@ -1,7 +1,7 @@
 use std::sync::Arc;
 
 use lightning::sign::KeysManager;
-use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate};
+use lightning::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement};
 use lightning::ln::peer_handler::{ErroringMessageHandler, IgnoringMessageHandler, PeerManager};
 use lightning::util::logger::{Logger, Record};
 use crate::config;
@@ -14,6 +14,7 @@ pub(crate) type GossipPeerManager<L> = Arc<PeerManager<lightning_net_tokio::Sock
 
 #[derive(Debug)]
 pub(crate) enum GossipMessage {
+       NodeAnnouncement(NodeAnnouncement, Option<u32>),
        // the second element is an optional override for the seen value
        ChannelAnnouncement(ChannelAnnouncement, Option<u32>),
        ChannelUpdate(ChannelUpdate, Option<u32>),