2 #![deny(broken_intra_doc_links)]
3 #![deny(private_intra_doc_links)]
4 #![deny(non_upper_case_globals)]
5 #![deny(non_camel_case_types)]
6 #![deny(non_snake_case)]
7 #![deny(unused_variables)]
8 #![deny(unused_imports)]
12 use std::collections::{HashMap, HashSet};
14 use std::io::BufReader;
17 use lightning::log_info;
19 use lightning::routing::gossip::{NetworkGraph, NodeId};
20 use lightning::util::logger::Logger;
21 use lightning::util::ser::{ReadableArgs, Writeable};
22 use tokio::sync::mpsc;
23 use crate::lookup::DeltaSet;
25 use crate::persistence::GossipPersister;
26 use crate::serialization::UpdateSerialization;
27 use crate::snapshot::Snapshotter;
28 use crate::types::RGSSLogger;
42 /// The purpose of this prefix is to identify the serialization format, should other rapid gossip
43 /// sync formats arise in the future.
45 /// The fourth byte is the protocol version in case our format gets updated.
46 const GOSSIP_PREFIX: [u8; 4] = [76, 68, 75, 1];
48 pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
49 network_graph: Arc<NetworkGraph<L>>,
53 pub struct SerializedResponse {
55 pub message_count: u32,
56 pub announcement_count: u32,
57 pub update_count: u32,
58 pub update_count_full: u32,
59 pub update_count_incremental: u32,
62 impl<L: Deref + Clone + Send + Sync + 'static> RapidSyncProcessor<L> where L::Target: Logger {
63 pub fn new(logger: L) -> Self {
64 let network = config::network();
65 let network_graph = if let Ok(file) = File::open(&config::network_graph_cache_path()) {
66 log_info!(logger, "Initializing from cached network graph…");
67 let mut buffered_reader = BufReader::new(file);
68 let network_graph_result = NetworkGraph::read(&mut buffered_reader, logger.clone());
69 if let Ok(network_graph) = network_graph_result {
70 log_info!(logger, "Initialized from cached network graph!");
73 log_info!(logger, "Initialization from cached network graph failed: {}", network_graph_result.err().unwrap());
74 NetworkGraph::new(network, logger.clone())
77 NetworkGraph::new(network, logger.clone())
79 let arc_network_graph = Arc::new(network_graph);
81 network_graph: arc_network_graph,
86 pub async fn start_sync(&self) {
87 // means to indicate sync completion status within this module
88 let (sync_completion_sender, mut sync_completion_receiver) = mpsc::channel::<()>(1);
90 if config::DOWNLOAD_NEW_GOSSIP {
91 let (mut persister, persistence_sender) = GossipPersister::new(self.network_graph.clone(), self.logger.clone());
93 log_info!(self.logger, "Starting gossip download");
94 tokio::spawn(tracking::download_gossip(persistence_sender, sync_completion_sender,
95 Arc::clone(&self.network_graph), self.logger.clone()));
96 log_info!(self.logger, "Starting gossip db persistence listener");
97 tokio::spawn(async move { persister.persist_gossip().await; });
99 sync_completion_sender.send(()).await.unwrap();
102 let sync_completion = sync_completion_receiver.recv().await;
103 if sync_completion.is_none() {
104 panic!("Sync failed!");
106 log_info!(self.logger, "Initial sync complete!");
108 // start the gossip snapshotting service
109 Snapshotter::new(Arc::clone(&self.network_graph), self.logger.clone()).snapshot_gossip().await;
113 /// This method generates a no-op blob that can be used as a delta where none exists.
115 /// The primary purpose of this method is the scenario of a client retrieving and processing a
116 /// given snapshot, and then immediately retrieving the would-be next snapshot at the timestamp
117 /// indicated by the one that was just processed.
118 /// Previously, there would not be a new snapshot to be processed for that particular timestamp yet,
119 /// and the server would return a 404 error.
121 /// In principle, this method could also be used to address another unfortunately all too common
122 /// pitfall: requesting snapshots from intermediate timestamps, i. e. those that are not multiples
123 /// of our granularity constant. Note that for that purpose, this method could be very dangerous,
124 /// because if consumed, the `timestamp` value calculated here will overwrite the timestamp that
125 /// the client previously had, which could result in duplicated or omitted gossip down the line.
126 fn serialize_empty_blob(current_timestamp: u64) -> Vec<u8> {
127 let mut blob = GOSSIP_PREFIX.to_vec();
129 let network = config::network();
130 let genesis_block = bitcoin::blockdata::constants::genesis_block(network);
131 let chain_hash = genesis_block.block_hash();
132 chain_hash.write(&mut blob).unwrap();
134 let blob_timestamp = Snapshotter::<Arc<RGSSLogger>>::round_down_to_nearest_multiple(current_timestamp, config::SNAPSHOT_CALCULATION_INTERVAL as u64) as u32;
135 blob_timestamp.write(&mut blob).unwrap();
137 0u32.write(&mut blob).unwrap(); // node count
138 0u32.write(&mut blob).unwrap(); // announcement count
139 0u32.write(&mut blob).unwrap(); // update count
144 async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, logger: L) -> SerializedResponse where L::Target: Logger {
145 let (client, connection) = lookup::connect_to_db().await;
147 network_graph.remove_stale_channels_and_tracking();
149 tokio::spawn(async move {
150 if let Err(e) = connection.await {
151 panic!("connection error: {}", e);
155 let mut output: Vec<u8> = vec![];
157 // set a flag if the chain hash is prepended
158 // chain hash only necessary if either channel announcements or non-incremental updates are present
159 // for announcement-free incremental-only updates, chain hash can be skipped
161 let mut node_id_set: HashSet<NodeId> = HashSet::new();
162 let mut node_id_indices: HashMap<NodeId, usize> = HashMap::new();
163 let mut node_ids: Vec<NodeId> = Vec::new();
164 let mut duplicate_node_ids: i32 = 0;
166 let mut get_node_id_index = |node_id: NodeId| {
167 if node_id_set.insert(node_id) {
168 node_ids.push(node_id);
169 let index = node_ids.len() - 1;
170 node_id_indices.insert(node_id, index);
173 duplicate_node_ids += 1;
174 node_id_indices[&node_id]
177 let mut delta_set = DeltaSet::new();
178 lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, logger.clone()).await;
179 log_info!(logger, "announcement channel count: {}", delta_set.len());
180 lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
181 log_info!(logger, "update-fetched channel count: {}", delta_set.len());
182 lookup::filter_delta_set(&mut delta_set, logger.clone());
183 log_info!(logger, "update-filtered channel count: {}", delta_set.len());
184 let serialization_details = serialization::serialize_delta_set(delta_set, last_sync_timestamp);
186 // process announcements
187 // write the number of channel announcements to the output
188 let announcement_count = serialization_details.announcements.len() as u32;
189 announcement_count.write(&mut output).unwrap();
190 let mut previous_announcement_scid = 0;
191 for current_announcement in serialization_details.announcements {
192 let id_index_1 = get_node_id_index(current_announcement.node_id_1);
193 let id_index_2 = get_node_id_index(current_announcement.node_id_2);
194 let mut stripped_announcement = serialization::serialize_stripped_channel_announcement(¤t_announcement, id_index_1, id_index_2, previous_announcement_scid);
195 output.append(&mut stripped_announcement);
197 previous_announcement_scid = current_announcement.short_channel_id;
201 let mut previous_update_scid = 0;
202 let update_count = serialization_details.updates.len() as u32;
203 update_count.write(&mut output).unwrap();
205 let default_update_values = serialization_details.full_update_defaults;
206 if update_count > 0 {
207 default_update_values.cltv_expiry_delta.write(&mut output).unwrap();
208 default_update_values.htlc_minimum_msat.write(&mut output).unwrap();
209 default_update_values.fee_base_msat.write(&mut output).unwrap();
210 default_update_values.fee_proportional_millionths.write(&mut output).unwrap();
211 default_update_values.htlc_maximum_msat.write(&mut output).unwrap();
214 let mut update_count_full = 0;
215 let mut update_count_incremental = 0;
216 for current_update in serialization_details.updates {
217 match ¤t_update {
218 UpdateSerialization::Full(_) => {
219 update_count_full += 1;
221 UpdateSerialization::Incremental(_, _) | UpdateSerialization::Reminder(_, _) => {
222 update_count_incremental += 1;
226 let mut stripped_update = serialization::serialize_stripped_channel_update(¤t_update, &default_update_values, previous_update_scid);
227 output.append(&mut stripped_update);
229 previous_update_scid = current_update.scid();
233 let message_count = announcement_count + update_count;
235 let mut prefixed_output = GOSSIP_PREFIX.to_vec();
237 // always write the chain hash
238 serialization_details.chain_hash.write(&mut prefixed_output).unwrap();
239 // always write the latest seen timestamp
240 let latest_seen_timestamp = serialization_details.latest_seen;
241 let overflow_seconds = latest_seen_timestamp % config::SNAPSHOT_CALCULATION_INTERVAL;
242 let serialized_seen_timestamp = latest_seen_timestamp.saturating_sub(overflow_seconds);
243 serialized_seen_timestamp.write(&mut prefixed_output).unwrap();
245 let node_id_count = node_ids.len() as u32;
246 node_id_count.write(&mut prefixed_output).unwrap();
248 for current_node_id in node_ids {
249 current_node_id.write(&mut prefixed_output).unwrap();
252 prefixed_output.append(&mut output);
254 log_info!(logger, "duplicated node ids: {}", duplicate_node_ids);
255 log_info!(logger, "latest seen timestamp: {:?}", serialization_details.latest_seen);
258 data: prefixed_output,
263 update_count_incremental,