2 #![deny(broken_intra_doc_links)]
3 #![deny(private_intra_doc_links)]
4 #![deny(non_upper_case_globals)]
5 #![deny(non_camel_case_types)]
6 #![deny(non_snake_case)]
7 #![deny(unused_variables)]
8 #![deny(unused_imports)]
12 use std::collections::{HashMap, HashSet};
14 use std::io::BufReader;
17 use bitcoin::blockdata::constants::ChainHash;
18 use lightning::log_info;
20 use lightning::routing::gossip::{NetworkGraph, NodeId};
21 use lightning::util::logger::Logger;
22 use lightning::util::ser::{ReadableArgs, Writeable};
23 use tokio::sync::mpsc;
24 use tokio_postgres::{Client, NoTls};
25 use crate::config::SYMLINK_GRANULARITY_INTERVAL;
26 use crate::lookup::DeltaSet;
28 use crate::persistence::GossipPersister;
29 use crate::serialization::UpdateSerialization;
30 use crate::snapshot::Snapshotter;
31 use crate::types::RGSSLogger;
48 /// The purpose of this prefix is to identify the serialization format, should other rapid gossip
49 /// sync formats arise in the future.
51 /// The fourth byte is the protocol version in case our format gets updated.
52 const GOSSIP_PREFIX: [u8; 4] = [76, 68, 75, 1];
54 pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
55 network_graph: Arc<NetworkGraph<L>>,
59 pub struct SerializedResponse {
61 pub message_count: u32,
62 pub announcement_count: u32,
63 pub update_count: u32,
64 pub update_count_full: u32,
65 pub update_count_incremental: u32,
68 impl<L: Deref + Clone + Send + Sync + 'static> RapidSyncProcessor<L> where L::Target: Logger {
69 pub fn new(logger: L) -> Self {
70 let network = config::network();
71 let network_graph = if let Ok(file) = File::open(&config::network_graph_cache_path()) {
72 log_info!(logger, "Initializing from cached network graph…");
73 let mut buffered_reader = BufReader::new(file);
74 let network_graph_result = NetworkGraph::read(&mut buffered_reader, logger.clone());
75 if let Ok(network_graph) = network_graph_result {
76 log_info!(logger, "Initialized from cached network graph!");
79 log_info!(logger, "Initialization from cached network graph failed: {}", network_graph_result.err().unwrap());
80 NetworkGraph::new(network, logger.clone())
83 NetworkGraph::new(network, logger.clone())
85 let arc_network_graph = Arc::new(network_graph);
87 network_graph: arc_network_graph,
92 pub async fn start_sync(&self) {
93 log_info!(self.logger, "Starting Rapid Gossip Sync Server");
94 log_info!(self.logger, "Snapshot interval: {} seconds", config::snapshot_generation_interval());
96 // means to indicate sync completion status within this module
97 let (sync_completion_sender, mut sync_completion_receiver) = mpsc::channel::<()>(1);
99 if config::DOWNLOAD_NEW_GOSSIP {
100 let (mut persister, persistence_sender) = GossipPersister::new(self.network_graph.clone(), self.logger.clone());
102 log_info!(self.logger, "Starting gossip download");
103 tokio::spawn(tracking::download_gossip(persistence_sender, sync_completion_sender,
104 Arc::clone(&self.network_graph), self.logger.clone()));
105 log_info!(self.logger, "Starting gossip db persistence listener");
106 tokio::spawn(async move { persister.persist_gossip().await; });
108 sync_completion_sender.send(()).await.unwrap();
111 let sync_completion = sync_completion_receiver.recv().await;
112 if sync_completion.is_none() {
113 panic!("Sync failed!");
115 log_info!(self.logger, "Initial sync complete!");
117 // start the gossip snapshotting service
118 Snapshotter::new(Arc::clone(&self.network_graph), self.logger.clone()).snapshot_gossip().await;
122 pub(crate) async fn connect_to_db() -> Client {
123 let connection_config = config::db_connection_config();
124 let (client, connection) = connection_config.connect(NoTls).await.unwrap();
126 tokio::spawn(async move {
127 if let Err(e) = connection.await {
128 panic!("connection error: {}", e);
134 let schema_name = tests::db_test_schema();
135 let schema_creation_command = format!("CREATE SCHEMA IF NOT EXISTS {}", schema_name);
136 client.execute(&schema_creation_command, &[]).await.unwrap();
137 client.execute(&format!("SET search_path TO {}", schema_name), &[]).await.unwrap();
140 client.execute("set time zone UTC", &[]).await.unwrap();
144 /// This method generates a no-op blob that can be used as a delta where none exists.
146 /// The primary purpose of this method is the scenario of a client retrieving and processing a
147 /// given snapshot, and then immediately retrieving the would-be next snapshot at the timestamp
148 /// indicated by the one that was just processed.
149 /// Previously, there would not be a new snapshot to be processed for that particular timestamp yet,
150 /// and the server would return a 404 error.
152 /// In principle, this method could also be used to address another unfortunately all too common
153 /// pitfall: requesting snapshots from intermediate timestamps, i. e. those that are not multiples
154 /// of our granularity constant. Note that for that purpose, this method could be very dangerous,
155 /// because if consumed, the `timestamp` value calculated here will overwrite the timestamp that
156 /// the client previously had, which could result in duplicated or omitted gossip down the line.
157 fn serialize_empty_blob(current_timestamp: u64) -> Vec<u8> {
158 let mut blob = GOSSIP_PREFIX.to_vec();
160 let network = config::network();
161 let chain_hash = ChainHash::using_genesis_block(network);
162 chain_hash.write(&mut blob).unwrap();
164 let blob_timestamp = Snapshotter::<Arc<RGSSLogger>>::round_down_to_nearest_multiple(current_timestamp, SYMLINK_GRANULARITY_INTERVAL as u64) as u32;
165 blob_timestamp.write(&mut blob).unwrap();
167 0u32.write(&mut blob).unwrap(); // node count
168 0u32.write(&mut blob).unwrap(); // announcement count
169 0u32.write(&mut blob).unwrap(); // update count
174 async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, logger: L) -> SerializedResponse where L::Target: Logger {
175 let client = connect_to_db().await;
177 network_graph.remove_stale_channels_and_tracking();
179 let mut output: Vec<u8> = vec![];
180 let snapshot_interval = config::snapshot_generation_interval();
182 // set a flag if the chain hash is prepended
183 // chain hash only necessary if either channel announcements or non-incremental updates are present
184 // for announcement-free incremental-only updates, chain hash can be skipped
186 let mut node_id_set: HashSet<NodeId> = HashSet::new();
187 let mut node_id_indices: HashMap<NodeId, usize> = HashMap::new();
188 let mut node_ids: Vec<NodeId> = Vec::new();
189 let mut duplicate_node_ids: i32 = 0;
191 let mut get_node_id_index = |node_id: NodeId| {
192 if node_id_set.insert(node_id) {
193 node_ids.push(node_id);
194 let index = node_ids.len() - 1;
195 node_id_indices.insert(node_id, index);
198 duplicate_node_ids += 1;
199 node_id_indices[&node_id]
202 let mut delta_set = DeltaSet::new();
203 lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, logger.clone()).await;
204 log_info!(logger, "announcement channel count: {}", delta_set.len());
205 lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
206 log_info!(logger, "update-fetched channel count: {}", delta_set.len());
207 lookup::filter_delta_set(&mut delta_set, logger.clone());
208 log_info!(logger, "update-filtered channel count: {}", delta_set.len());
209 let serialization_details = serialization::serialize_delta_set(delta_set, last_sync_timestamp);
211 // process announcements
212 // write the number of channel announcements to the output
213 let announcement_count = serialization_details.announcements.len() as u32;
214 announcement_count.write(&mut output).unwrap();
215 let mut previous_announcement_scid = 0;
216 for current_announcement in serialization_details.announcements {
217 let id_index_1 = get_node_id_index(current_announcement.node_id_1);
218 let id_index_2 = get_node_id_index(current_announcement.node_id_2);
219 let mut stripped_announcement = serialization::serialize_stripped_channel_announcement(¤t_announcement, id_index_1, id_index_2, previous_announcement_scid);
220 output.append(&mut stripped_announcement);
222 previous_announcement_scid = current_announcement.short_channel_id;
226 let mut previous_update_scid = 0;
227 let update_count = serialization_details.updates.len() as u32;
228 update_count.write(&mut output).unwrap();
230 let default_update_values = serialization_details.full_update_defaults;
231 if update_count > 0 {
232 default_update_values.cltv_expiry_delta.write(&mut output).unwrap();
233 default_update_values.htlc_minimum_msat.write(&mut output).unwrap();
234 default_update_values.fee_base_msat.write(&mut output).unwrap();
235 default_update_values.fee_proportional_millionths.write(&mut output).unwrap();
236 default_update_values.htlc_maximum_msat.write(&mut output).unwrap();
239 let mut update_count_full = 0;
240 let mut update_count_incremental = 0;
241 for current_update in serialization_details.updates {
242 match ¤t_update {
243 UpdateSerialization::Full(_) => {
244 update_count_full += 1;
246 UpdateSerialization::Incremental(_, _) | UpdateSerialization::Reminder(_, _) => {
247 update_count_incremental += 1;
251 let mut stripped_update = serialization::serialize_stripped_channel_update(¤t_update, &default_update_values, previous_update_scid);
252 output.append(&mut stripped_update);
254 previous_update_scid = current_update.scid();
258 let message_count = announcement_count + update_count;
260 let mut prefixed_output = GOSSIP_PREFIX.to_vec();
262 // always write the chain hash
263 serialization_details.chain_hash.write(&mut prefixed_output).unwrap();
264 // always write the latest seen timestamp
265 let latest_seen_timestamp = serialization_details.latest_seen;
266 let overflow_seconds = latest_seen_timestamp % snapshot_interval;
267 let serialized_seen_timestamp = latest_seen_timestamp.saturating_sub(overflow_seconds);
268 serialized_seen_timestamp.write(&mut prefixed_output).unwrap();
270 let node_id_count = node_ids.len() as u32;
271 node_id_count.write(&mut prefixed_output).unwrap();
273 for current_node_id in node_ids {
274 current_node_id.write(&mut prefixed_output).unwrap();
277 prefixed_output.append(&mut output);
279 log_info!(logger, "duplicated node ids: {}", duplicate_node_ids);
280 log_info!(logger, "latest seen timestamp: {:?}", serialization_details.latest_seen);
283 data: prefixed_output,
288 update_count_incremental,