X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fnetwork_graph.rs;h=486b71578f3fde8775e1d7fe86bd0283c5ef7d8e;hb=0dfcacd22c23f69b6526c9c6507d21427a2b7ccb;hp=13dc662642c62ccd8aabc9affd520a9ad927bb2a;hpb=4894d52d30399c21b7994952a8de0d1d7848c58d;p=rust-lightning diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 13dc6626..486b7157 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -28,22 +28,28 @@ use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalFie use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; -use util::logger::Logger; +use util::logger::{Logger, Level}; use util::events::{MessageSendEvent, MessageSendEventsProvider}; - -use std::{cmp, fmt}; -use std::sync::{RwLock, RwLockReadGuard}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Mutex; -use std::collections::BTreeMap; -use std::collections::btree_map::Entry as BtreeEntry; -use std::ops::Deref; +use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; + +use io; +use prelude::*; +use alloc::collections::{BTreeMap, btree_map::Entry as BtreeEntry}; +use core::{cmp, fmt}; +use sync::{RwLock, RwLockReadGuard}; +use core::sync::atomic::{AtomicUsize, Ordering}; +use sync::Mutex; +use core::ops::Deref; use bitcoin::hashes::hex::ToHex; /// The maximum number of extra bytes which we do not understand in a gossip message before we will /// refuse to relay the message. const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024; +/// Maximum number of short_channel_ids that will be encoded in one gossip reply message. +/// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations. +const MAX_SCIDS_PER_REPLY: usize = 8000; + /// Represents the network as nodes and channels between them #[derive(Clone, PartialEq)] pub struct NetworkGraph { @@ -147,7 +153,7 @@ macro_rules! secp_verify_sig { }; } -impl RoutingMessageHandler for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { +impl RoutingMessageHandler for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { self.network_graph.write().unwrap().update_node_from_announcement(msg, &self.secp_ctx)?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && @@ -164,12 +170,16 @@ impl RoutingMessageHandler for N fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) { match update { &msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => { + let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1); + log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}abled.", msg.contents.short_channel_id, if chan_enabled { "en" } else { "dis" }); let _ = self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx); }, &msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } => { + log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, short_channel_id); self.network_graph.write().unwrap().close_channel_from_update(short_channel_id, is_permanent); }, &msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent } => { + log_debug!(self.logger, "{} node graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, node_id); self.network_graph.write().unwrap().fail_node(node_id, is_permanent); }, } @@ -312,12 +322,109 @@ impl RoutingMessageHandler for N Ok(()) } - fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { - // TODO - Err(LightningError { - err: String::from("Not implemented"), - action: ErrorAction::IgnoreError, - }) + /// Processes a query from a peer by finding announced/public channels whose funding UTXOs + /// are in the specified block range. Due to message size limits, large range + /// queries may result in several reply messages. This implementation enqueues + /// all reply messages into pending events. Each message will allocate just under 65KiB. A full + /// sync of the public routing table with 128k channels will generated 16 messages and allocate ~1MB. + /// Logic can be changed to reduce allocation if/when a full sync of the routing table impacts + /// memory constrained systems. + fn handle_query_channel_range(&self, their_node_id: &PublicKey, msg: QueryChannelRange) -> Result<(), LightningError> { + log_debug!(self.logger, "Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks); + + let network_graph = self.network_graph.read().unwrap(); + + let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0); + + // We might receive valid queries with end_blocknum that would overflow SCID conversion. + // If so, we manually cap the ending block to avoid this overflow. + let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); + + // Per spec, we must reply to a query. Send an empty message when things are invalid. + if msg.chain_hash != network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(MessageSendEvent::SendReplyChannelRange { + node_id: their_node_id.clone(), + msg: ReplyChannelRange { + chain_hash: msg.chain_hash.clone(), + first_blocknum: msg.first_blocknum, + number_of_blocks: msg.number_of_blocks, + sync_complete: true, + short_channel_ids: vec![], + } + }); + return Err(LightningError { + err: String::from("query_channel_range could not be processed"), + action: ErrorAction::IgnoreError, + }); + } + + // Creates channel batches. We are not checking if the channel is routable + // (has at least one update). A peer may still want to know the channel + // exists even if its not yet routable. + let mut batches: Vec> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)]; + for (_, ref chan) in network_graph.get_channels().range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) { + if let Some(chan_announcement) = &chan.announcement_message { + // Construct a new batch if last one is full + if batches.last().unwrap().len() == batches.last().unwrap().capacity() { + batches.push(Vec::with_capacity(MAX_SCIDS_PER_REPLY)); + } + + let batch = batches.last_mut().unwrap(); + batch.push(chan_announcement.contents.short_channel_id); + } + } + drop(network_graph); + + let mut pending_events = self.pending_events.lock().unwrap(); + let batch_count = batches.len(); + let mut prev_batch_endblock = msg.first_blocknum; + for (batch_index, batch) in batches.into_iter().enumerate() { + // Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum` + // and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`. + // + // Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each + // reply is >= the previous reply's `first_blocknum` and either exactly the previous + // reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a + // significant diversion from the requirements set by the spec, and, in case of blocks + // with no channel opens (e.g. empty blocks), requires that we use the previous value + // and *not* derive the first_blocknum from the actual first block of the reply. + let first_blocknum = prev_batch_endblock; + + // Each message carries the number of blocks (from the `first_blocknum`) its contents + // fit in. Though there is no requirement that we use exactly the number of blocks its + // contents are from, except for the bogus requirements c-lightning enforces, above. + // + // Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be + // >= the query's end block. Thus, for the last reply, we calculate the difference + // between the query's end block and the start of the reply. + // + // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and + // first_blocknum will be either msg.first_blocknum or a higher block height. + let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 { + (true, msg.end_blocknum() - first_blocknum) + } + // Prior replies should use the number of blocks that fit into the reply. Overflow + // safe since first_blocknum is always <= last SCID's block. + else { + (false, block_from_scid(batch.last().unwrap()) - first_blocknum) + }; + + prev_batch_endblock = first_blocknum + number_of_blocks; + + pending_events.push(MessageSendEvent::SendReplyChannelRange { + node_id: their_node_id.clone(), + msg: ReplyChannelRange { + chain_hash: msg.chain_hash.clone(), + first_blocknum, + number_of_blocks, + sync_complete, + short_channel_ids: batch, + } + }); + } + + Ok(()) } fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { @@ -337,7 +444,7 @@ where fn get_and_clear_pending_msg_events(&self) -> Vec { let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); - std::mem::swap(&mut ret, &mut pending_events); + core::mem::swap(&mut ret, &mut pending_events); ret } } @@ -373,14 +480,14 @@ impl fmt::Display for DirectionalChannelInfo { } } -impl_writeable!(DirectionalChannelInfo, 0, { - last_update, - enabled, - cltv_expiry_delta, - htlc_minimum_msat, - htlc_maximum_msat, - fees, - last_update_message +impl_writeable_tlv_based!(DirectionalChannelInfo, { + (0, last_update, required), + (2, enabled, required), + (4, cltv_expiry_delta, required), + (6, htlc_minimum_msat, required), + (8, htlc_maximum_msat, required), + (10, fees, required), + (12, last_update_message, required), }); #[derive(Clone, Debug, PartialEq)] @@ -414,14 +521,14 @@ impl fmt::Display for ChannelInfo { } } -impl_writeable!(ChannelInfo, 0, { - features, - node_one, - one_to_two, - node_two, - two_to_one, - capacity_sats, - announcement_message +impl_writeable_tlv_based!(ChannelInfo, { + (0, features, required), + (2, node_one, required), + (4, one_to_two, required), + (6, node_two, required), + (8, two_to_one, required), + (10, capacity_sats, required), + (12, announcement_message, required), }); @@ -435,24 +542,10 @@ pub struct RoutingFees { pub proportional_millionths: u32, } -impl Readable for RoutingFees{ - fn read(reader: &mut R) -> Result { - let base_msat: u32 = Readable::read(reader)?; - let proportional_millionths: u32 = Readable::read(reader)?; - Ok(RoutingFees { - base_msat, - proportional_millionths, - }) - } -} - -impl Writeable for RoutingFees { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - self.base_msat.write(writer)?; - self.proportional_millionths.write(writer)?; - Ok(()) - } -} +impl_writeable_tlv_based!(RoutingFees, { + (0, base_msat, required), + (2, proportional_millionths, required) +}); #[derive(Clone, Debug, PartialEq)] /// Information received in the latest node_announcement from this node. @@ -477,48 +570,14 @@ pub struct NodeAnnouncementInfo { pub announcement_message: Option } -impl Writeable for NodeAnnouncementInfo { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - self.features.write(writer)?; - self.last_update.write(writer)?; - self.rgb.write(writer)?; - self.alias.write(writer)?; - (self.addresses.len() as u64).write(writer)?; - for ref addr in &self.addresses { - addr.write(writer)?; - } - self.announcement_message.write(writer)?; - Ok(()) - } -} - -impl Readable for NodeAnnouncementInfo { - fn read(reader: &mut R) -> Result { - let features = Readable::read(reader)?; - let last_update = Readable::read(reader)?; - let rgb = Readable::read(reader)?; - let alias = Readable::read(reader)?; - let addresses_count: u64 = Readable::read(reader)?; - let mut addresses = Vec::with_capacity(cmp::min(addresses_count, MAX_ALLOC_SIZE / 40) as usize); - for _ in 0..addresses_count { - match Readable::read(reader) { - Ok(Ok(addr)) => { addresses.push(addr); }, - Ok(Err(_)) => return Err(DecodeError::InvalidValue), - Err(DecodeError::ShortRead) => return Err(DecodeError::BadLengthDescriptor), - _ => unreachable!(), - } - } - let announcement_message = Readable::read(reader)?; - Ok(NodeAnnouncementInfo { - features, - last_update, - rgb, - alias, - addresses, - announcement_message - }) - } -} +impl_writeable_tlv_based!(NodeAnnouncementInfo, { + (0, features, required), + (2, last_update, required), + (4, rgb, required), + (6, alias, required), + (8, announcement_message, option), + (10, addresses, vec_type), +}); #[derive(Clone, Debug, PartialEq)] /// Details about a node in the network, known from the network announcement. @@ -543,39 +602,19 @@ impl fmt::Display for NodeInfo { } } -impl Writeable for NodeInfo { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - (self.channels.len() as u64).write(writer)?; - for ref chan in self.channels.iter() { - chan.write(writer)?; - } - self.lowest_inbound_channel_fees.write(writer)?; - self.announcement_info.write(writer)?; - Ok(()) - } -} - -const MAX_ALLOC_SIZE: u64 = 64*1024; +impl_writeable_tlv_based!(NodeInfo, { + (0, lowest_inbound_channel_fees, option), + (2, announcement_info, option), + (4, channels, vec_type), +}); -impl Readable for NodeInfo { - fn read(reader: &mut R) -> Result { - let channels_count: u64 = Readable::read(reader)?; - let mut channels = Vec::with_capacity(cmp::min(channels_count, MAX_ALLOC_SIZE / 8) as usize); - for _ in 0..channels_count { - channels.push(Readable::read(reader)?); - } - let lowest_inbound_channel_fees = Readable::read(reader)?; - let announcement_info = Readable::read(reader)?; - Ok(NodeInfo { - channels, - lowest_inbound_channel_fees, - announcement_info, - }) - } -} +const SERIALIZATION_VERSION: u8 = 1; +const MIN_SERIALIZATION_VERSION: u8 = 1; impl Writeable for NetworkGraph { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); + self.genesis_hash.write(writer)?; (self.channels.len() as u64).write(writer)?; for (ref chan_id, ref chan_info) in self.channels.iter() { @@ -587,12 +626,16 @@ impl Writeable for NetworkGraph { node_id.write(writer)?; node_info.write(writer)?; } + + write_tlv_fields!(writer, {}); Ok(()) } } impl Readable for NetworkGraph { - fn read(reader: &mut R) -> Result { + fn read(reader: &mut R) -> Result { + let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + let genesis_hash: BlockHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; let mut channels = BTreeMap::new(); @@ -608,6 +651,8 @@ impl Readable for NetworkGraph { let node_info = Readable::read(reader)?; nodes.insert(node_id, node_info); } + read_tlv_fields!(reader, {}); + Ok(NetworkGraph { genesis_hash, channels, @@ -689,7 +734,7 @@ impl NetworkGraph { Some(node) => { if let Some(node_info) = node.announcement_info.as_ref() { if node_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); } } @@ -810,7 +855,7 @@ impl NetworkGraph { Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.short_channel_id); *entry.get_mut() = chan_info; } else { - return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError}) + return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}) } }, BtreeEntry::Vacant(entry) => { @@ -912,7 +957,7 @@ impl NetworkGraph { ( $target: expr, $src_node: expr) => { if let Some(existing_chan_info) = $target.as_ref() { if existing_chan_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); } chan_was_enabled = existing_chan_info.enabled; } else { @@ -1028,6 +1073,7 @@ mod tests { use util::logger::Logger; use util::ser::{Readable, Writeable}; use util::events::{MessageSendEvent, MessageSendEventsProvider}; + use util::scid_utils::scid_from_parts; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; @@ -1042,7 +1088,9 @@ mod tests { use bitcoin::secp256k1::key::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; - use std::sync::Arc; + use io; + use prelude::*; + use sync::Arc; fn create_net_graph_msg_handler() -> (Secp256k1, NetGraphMsgHandler, Arc>) { let secp_ctx = Secp256k1::new(); @@ -1950,7 +1998,7 @@ mod tests { assert!(!network.get_nodes().is_empty()); assert!(!network.get_channels().is_empty()); network.write(&mut w).unwrap(); - assert!(::read(&mut ::std::io::Cursor::new(&w.0)).unwrap() == *network); + assert!(::read(&mut io::Cursor::new(&w.0)).unwrap() == *network); } #[test] @@ -2089,17 +2137,312 @@ mod tests { #[test] fn handling_query_channel_range() { let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); - let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); + let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); + let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); + let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); + let bitcoin_key_1 = PublicKey::from_secret_key(&secp_ctx, node_1_btckey); + let bitcoin_key_2 = PublicKey::from_secret_key(&secp_ctx, node_2_btckey); - let result = net_graph_msg_handler.handle_query_channel_range(&node_id, QueryChannelRange { - chain_hash, - first_blocknum: 0, - number_of_blocks: 0xffff_ffff, - }); - assert!(result.is_err()); + let mut scids: Vec = vec![ + scid_from_parts(0xfffffe, 0xffffff, 0xffff).unwrap(), // max + scid_from_parts(0xffffff, 0xffffff, 0xffff).unwrap(), // never + ]; + + // used for testing multipart reply across blocks + for block in 100000..=108001 { + scids.push(scid_from_parts(block, 0, 0).unwrap()); + } + + // used for testing resumption on same block + scids.push(scid_from_parts(108001, 1, 0).unwrap()); + + for scid in scids { + let unsigned_announcement = UnsignedChannelAnnouncement { + features: ChannelFeatures::known(), + chain_hash: chain_hash.clone(), + short_channel_id: scid, + node_id_1, + node_id_2, + bitcoin_key_1, + bitcoin_key_2, + excess_data: Vec::new(), + }; + + let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); + let valid_announcement = ChannelAnnouncement { + node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), + node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), + bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), + bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), + contents: unsigned_announcement.clone(), + }; + match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { + Ok(_) => (), + _ => panic!() + }; + } + + // Error when number_of_blocks=0 + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 0, + }, + false, + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 0, + sync_complete: true, + short_channel_ids: vec![] + }] + ); + + // Error when wrong chain + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + first_blocknum: 0, + number_of_blocks: 0xffff_ffff, + }, + false, + vec![ReplyChannelRange { + chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + first_blocknum: 0, + number_of_blocks: 0xffff_ffff, + sync_complete: true, + short_channel_ids: vec![], + }] + ); + + // Error when first_blocknum > 0xffffff + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0x01000000, + number_of_blocks: 0xffff_ffff, + }, + false, + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0x01000000, + number_of_blocks: 0xffff_ffff, + sync_complete: true, + short_channel_ids: vec![] + }] + ); + + // Empty reply when max valid SCID block num + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xffffff, + number_of_blocks: 1, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xffffff, + number_of_blocks: 1, + sync_complete: true, + short_channel_ids: vec![] + }, + ] + ); + + // No results in valid query range + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 1000, + number_of_blocks: 1000, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 1000, + number_of_blocks: 1000, + sync_complete: true, + short_channel_ids: vec![], + } + ] + ); + + // Overflow first_blocknum + number_of_blocks + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xfe0000, + number_of_blocks: 0xffffffff, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xfe0000, + number_of_blocks: 0xffffffff - 0xfe0000, + sync_complete: true, + short_channel_ids: vec![ + 0xfffffe_ffffff_ffff, // max + ] + } + ] + ); + + // Single block exactly full + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 8000, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 8000, + sync_complete: true, + short_channel_ids: (100000..=107999) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }, + ] + ); + + // Multiple split on new block + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 8001, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 7999, + sync_complete: false, + short_channel_ids: (100000..=107999) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }, + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 107999, + number_of_blocks: 2, + sync_complete: true, + short_channel_ids: vec![ + scid_from_parts(108000, 0, 0).unwrap(), + ], + } + ] + ); + + // Multiple split on same block + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100002, + number_of_blocks: 8000, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100002, + number_of_blocks: 7999, + sync_complete: false, + short_channel_ids: (100002..=108001) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }, + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 108001, + number_of_blocks: 1, + sync_complete: true, + short_channel_ids: vec![ + scid_from_parts(108001, 1, 0).unwrap(), + ], + } + ] + ); + } + + fn do_handling_query_channel_range( + net_graph_msg_handler: &NetGraphMsgHandler, Arc>, + test_node_id: &PublicKey, + msg: QueryChannelRange, + expected_ok: bool, + expected_replies: Vec + ) { + let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1); + let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum; + let query_end_blocknum = msg.end_blocknum(); + let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg); + + if expected_ok { + assert!(result.is_ok()); + } else { + assert!(result.is_err()); + } + + let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), expected_replies.len()); + + for i in 0..events.len() { + let expected_reply = &expected_replies[i]; + match &events[i] { + MessageSendEvent::SendReplyChannelRange { node_id, msg } => { + assert_eq!(node_id, test_node_id); + assert_eq!(msg.chain_hash, expected_reply.chain_hash); + assert_eq!(msg.first_blocknum, expected_reply.first_blocknum); + assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks); + assert_eq!(msg.sync_complete, expected_reply.sync_complete); + assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids); + + // Enforce exactly the sequencing requirements present on c-lightning v0.9.3 + assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1)); + assert!(msg.first_blocknum >= max_firstblocknum); + max_firstblocknum = msg.first_blocknum; + c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks); + + // Check that the last block count is >= the query's end_blocknum + if i == events.len() - 1 { + assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum); + } + }, + _ => panic!("expected MessageSendEvent::SendReplyChannelRange"), + } + } } #[test] @@ -2117,3 +2460,30 @@ mod tests { assert!(result.is_err()); } } + +#[cfg(all(test, feature = "unstable"))] +mod benches { + use super::*; + + use test::Bencher; + use std::io::Read; + + #[bench] + fn read_network_graph(bench: &mut Bencher) { + let mut d = ::routing::router::test_utils::get_route_file().unwrap(); + let mut v = Vec::new(); + d.read_to_end(&mut v).unwrap(); + bench.iter(|| { + let _ = NetworkGraph::read(&mut std::io::Cursor::new(&v)).unwrap(); + }); + } + + #[bench] + fn write_network_graph(bench: &mut Bencher) { + let mut d = ::routing::router::test_utils::get_route_file().unwrap(); + let net_graph = NetworkGraph::read(&mut d).unwrap(); + bench.iter(|| { + let _ = net_graph.encode(); + }); + } +}