47f62f6228793445d1ebea318eec04e0d760a147
[rapid-gossip-sync-server] / src / lib.rs
1 #![deny(unsafe_code)]
2 #![deny(broken_intra_doc_links)]
3 #![deny(private_intra_doc_links)]
4 #![deny(non_upper_case_globals)]
5 #![deny(non_camel_case_types)]
6 #![deny(non_snake_case)]
7 #![deny(unused_variables)]
8 #![deny(unused_imports)]
9
10 extern crate core;
11
12 use std::collections::{HashMap, HashSet};
13 use std::fs::File;
14 use std::io::BufReader;
15 use std::ops::Deref;
16 use std::sync::Arc;
17 use lightning::log_info;
18
19 use lightning::routing::gossip::{NetworkGraph, NodeId};
20 use lightning::util::logger::Logger;
21 use lightning::util::ser::{ReadableArgs, Writeable};
22 use tokio::sync::mpsc;
23 use tokio_postgres::{Client, NoTls};
24 use crate::lookup::DeltaSet;
25
26 use crate::persistence::GossipPersister;
27 use crate::serialization::UpdateSerialization;
28 use crate::snapshot::Snapshotter;
29 use crate::types::RGSSLogger;
30
31 mod downloader;
32 mod tracking;
33 mod lookup;
34 mod persistence;
35 mod serialization;
36 mod snapshot;
37 mod config;
38 mod hex_utils;
39 mod verifier;
40
41 pub mod types;
42
43 /// The purpose of this prefix is to identify the serialization format, should other rapid gossip
44 /// sync formats arise in the future.
45 ///
46 /// The fourth byte is the protocol version in case our format gets updated.
47 const GOSSIP_PREFIX: [u8; 4] = [76, 68, 75, 1];
48
49 pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
50         network_graph: Arc<NetworkGraph<L>>,
51         logger: L
52 }
53
54 pub struct SerializedResponse {
55         pub data: Vec<u8>,
56         pub message_count: u32,
57         pub announcement_count: u32,
58         pub update_count: u32,
59         pub update_count_full: u32,
60         pub update_count_incremental: u32,
61 }
62
63 impl<L: Deref + Clone + Send + Sync + 'static> RapidSyncProcessor<L> where L::Target: Logger {
64         pub fn new(logger: L) -> Self {
65                 let network = config::network();
66                 let network_graph = if let Ok(file) = File::open(&config::network_graph_cache_path()) {
67                         log_info!(logger, "Initializing from cached network graph…");
68                         let mut buffered_reader = BufReader::new(file);
69                         let network_graph_result = NetworkGraph::read(&mut buffered_reader, logger.clone());
70                         if let Ok(network_graph) = network_graph_result {
71                                 log_info!(logger, "Initialized from cached network graph!");
72                                 network_graph
73                         } else {
74                                 log_info!(logger, "Initialization from cached network graph failed: {}", network_graph_result.err().unwrap());
75                                 NetworkGraph::new(network, logger.clone())
76                         }
77                 } else {
78                         NetworkGraph::new(network, logger.clone())
79                 };
80                 let arc_network_graph = Arc::new(network_graph);
81                 Self {
82                         network_graph: arc_network_graph,
83                         logger
84                 }
85         }
86
87         pub async fn start_sync(&self) {
88                 // means to indicate sync completion status within this module
89                 let (sync_completion_sender, mut sync_completion_receiver) = mpsc::channel::<()>(1);
90
91                 if config::DOWNLOAD_NEW_GOSSIP {
92                         let (mut persister, persistence_sender) = GossipPersister::new(self.network_graph.clone(), self.logger.clone());
93
94                         log_info!(self.logger, "Starting gossip download");
95                         tokio::spawn(tracking::download_gossip(persistence_sender, sync_completion_sender,
96                                 Arc::clone(&self.network_graph), self.logger.clone()));
97                         log_info!(self.logger, "Starting gossip db persistence listener");
98                         tokio::spawn(async move { persister.persist_gossip().await; });
99                 } else {
100                         sync_completion_sender.send(()).await.unwrap();
101                 }
102
103                 let sync_completion = sync_completion_receiver.recv().await;
104                 if sync_completion.is_none() {
105                         panic!("Sync failed!");
106                 }
107                 log_info!(self.logger, "Initial sync complete!");
108
109                 // start the gossip snapshotting service
110                 Snapshotter::new(Arc::clone(&self.network_graph), self.logger.clone()).snapshot_gossip().await;
111         }
112 }
113
114 pub(crate) async fn connect_to_db() -> Client {
115         let connection_config = config::db_connection_config();
116         let (client, connection) = connection_config.connect(NoTls).await.unwrap();
117
118         tokio::spawn(async move {
119                 if let Err(e) = connection.await {
120                         panic!("connection error: {}", e);
121                 }
122         });
123
124         client.execute("set time zone UTC", &[]).await.unwrap();
125         client
126 }
127
128 /// This method generates a no-op blob that can be used as a delta where none exists.
129 ///
130 /// The primary purpose of this method is the scenario of a client retrieving and processing a
131 /// given snapshot, and then immediately retrieving the would-be next snapshot at the timestamp
132 /// indicated by the one that was just processed.
133 /// Previously, there would not be a new snapshot to be processed for that particular timestamp yet,
134 /// and the server would return a 404 error.
135 ///
136 /// In principle, this method could also be used to address another unfortunately all too common
137 /// pitfall: requesting snapshots from intermediate timestamps, i. e. those that are not multiples
138 /// of our granularity constant. Note that for that purpose, this method could be very dangerous,
139 /// because if consumed, the `timestamp` value calculated here will overwrite the timestamp that
140 /// the client previously had, which could result in duplicated or omitted gossip down the line.
141 fn serialize_empty_blob(current_timestamp: u64) -> Vec<u8> {
142         let mut blob = GOSSIP_PREFIX.to_vec();
143
144         let network = config::network();
145         let calc_interval = config::calculate_interval();
146         let genesis_block = bitcoin::blockdata::constants::genesis_block(network);
147         let chain_hash = genesis_block.block_hash();
148         chain_hash.write(&mut blob).unwrap();
149
150         let blob_timestamp = Snapshotter::<Arc<RGSSLogger>>::round_down_to_nearest_multiple(current_timestamp, calc_interval as u64) as u32;
151         blob_timestamp.write(&mut blob).unwrap();
152
153         0u32.write(&mut blob).unwrap(); // node count
154         0u32.write(&mut blob).unwrap(); // announcement count
155         0u32.write(&mut blob).unwrap(); // update count
156
157         blob
158 }
159
160 async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, logger: L) -> SerializedResponse where L::Target: Logger {
161         let client = connect_to_db().await;
162
163         network_graph.remove_stale_channels_and_tracking();
164
165         let mut output: Vec<u8> = vec![];
166         let calc_interval = config::calculate_interval();
167
168         // set a flag if the chain hash is prepended
169         // chain hash only necessary if either channel announcements or non-incremental updates are present
170         // for announcement-free incremental-only updates, chain hash can be skipped
171
172         let mut node_id_set: HashSet<NodeId> = HashSet::new();
173         let mut node_id_indices: HashMap<NodeId, usize> = HashMap::new();
174         let mut node_ids: Vec<NodeId> = Vec::new();
175         let mut duplicate_node_ids: i32 = 0;
176
177         let mut get_node_id_index = |node_id: NodeId| {
178                 if node_id_set.insert(node_id) {
179                         node_ids.push(node_id);
180                         let index = node_ids.len() - 1;
181                         node_id_indices.insert(node_id, index);
182                         return index;
183                 }
184                 duplicate_node_ids += 1;
185                 node_id_indices[&node_id]
186         };
187
188         let mut delta_set = DeltaSet::new();
189         lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, logger.clone()).await;
190         log_info!(logger, "announcement channel count: {}", delta_set.len());
191         lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
192         log_info!(logger, "update-fetched channel count: {}", delta_set.len());
193         lookup::filter_delta_set(&mut delta_set, logger.clone());
194         log_info!(logger, "update-filtered channel count: {}", delta_set.len());
195         let serialization_details = serialization::serialize_delta_set(delta_set, last_sync_timestamp);
196
197         // process announcements
198         // write the number of channel announcements to the output
199         let announcement_count = serialization_details.announcements.len() as u32;
200         announcement_count.write(&mut output).unwrap();
201         let mut previous_announcement_scid = 0;
202         for current_announcement in serialization_details.announcements {
203                 let id_index_1 = get_node_id_index(current_announcement.node_id_1);
204                 let id_index_2 = get_node_id_index(current_announcement.node_id_2);
205                 let mut stripped_announcement = serialization::serialize_stripped_channel_announcement(&current_announcement, id_index_1, id_index_2, previous_announcement_scid);
206                 output.append(&mut stripped_announcement);
207
208                 previous_announcement_scid = current_announcement.short_channel_id;
209         }
210
211         // process updates
212         let mut previous_update_scid = 0;
213         let update_count = serialization_details.updates.len() as u32;
214         update_count.write(&mut output).unwrap();
215
216         let default_update_values = serialization_details.full_update_defaults;
217         if update_count > 0 {
218                 default_update_values.cltv_expiry_delta.write(&mut output).unwrap();
219                 default_update_values.htlc_minimum_msat.write(&mut output).unwrap();
220                 default_update_values.fee_base_msat.write(&mut output).unwrap();
221                 default_update_values.fee_proportional_millionths.write(&mut output).unwrap();
222                 default_update_values.htlc_maximum_msat.write(&mut output).unwrap();
223         }
224
225         let mut update_count_full = 0;
226         let mut update_count_incremental = 0;
227         for current_update in serialization_details.updates {
228                 match &current_update {
229                         UpdateSerialization::Full(_) => {
230                                 update_count_full += 1;
231                         }
232                         UpdateSerialization::Incremental(_, _) | UpdateSerialization::Reminder(_, _) => {
233                                 update_count_incremental += 1;
234                         }
235                 };
236
237                 let mut stripped_update = serialization::serialize_stripped_channel_update(&current_update, &default_update_values, previous_update_scid);
238                 output.append(&mut stripped_update);
239
240                 previous_update_scid = current_update.scid();
241         }
242
243         // some stats
244         let message_count = announcement_count + update_count;
245
246         let mut prefixed_output = GOSSIP_PREFIX.to_vec();
247
248         // always write the chain hash
249         serialization_details.chain_hash.write(&mut prefixed_output).unwrap();
250         // always write the latest seen timestamp
251         let latest_seen_timestamp = serialization_details.latest_seen;
252         let overflow_seconds = latest_seen_timestamp % calc_interval;
253         let serialized_seen_timestamp = latest_seen_timestamp.saturating_sub(overflow_seconds);
254         serialized_seen_timestamp.write(&mut prefixed_output).unwrap();
255
256         let node_id_count = node_ids.len() as u32;
257         node_id_count.write(&mut prefixed_output).unwrap();
258
259         for current_node_id in node_ids {
260                 current_node_id.write(&mut prefixed_output).unwrap();
261         }
262
263         prefixed_output.append(&mut output);
264
265         log_info!(logger, "duplicated node ids: {}", duplicate_node_ids);
266         log_info!(logger, "latest seen timestamp: {:?}", serialization_details.latest_seen);
267
268         SerializedResponse {
269                 data: prefixed_output,
270                 message_count,
271                 announcement_count,
272                 update_count,
273                 update_count_full,
274                 update_count_incremental,
275         }
276 }