X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fnetwork_graph.rs;h=16e0a978a91802c24dbb9ed1db49f960b5be0725;hb=5b59178f894b2bc597f19bab8727b4391cde7b87;hp=e11a53b769183cf7bec0bb81c1510cb6565505f7;hpb=7815965bf758dbcf8edf7beac5e4a95291ae7a3e;p=rust-lightning diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index e11a53b76..16e0a978a 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -32,13 +32,13 @@ use util::logger::Logger; use util::events::{MessageSendEvent, MessageSendEventsProvider}; use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; -use std::{cmp, fmt}; +use core::{cmp, fmt}; use std::sync::{RwLock, RwLockReadGuard}; -use std::sync::atomic::{AtomicUsize, Ordering}; +use core::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; use std::collections::BTreeMap; use std::collections::btree_map::Entry as BtreeEntry; -use std::ops::Deref; +use core::ops::Deref; use bitcoin::hashes::hex::ToHex; /// The maximum number of extra bytes which we do not understand in a gossip message before we will @@ -152,7 +152,7 @@ macro_rules! secp_verify_sig { }; } -impl RoutingMessageHandler for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { +impl RoutingMessageHandler for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { self.network_graph.write().unwrap().update_node_from_announcement(msg, &self.secp_ctx)?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && @@ -374,28 +374,18 @@ impl RoutingMessageHandler for N let mut pending_events = self.pending_events.lock().unwrap(); let batch_count = batches.len(); for (batch_index, batch) in batches.into_iter().enumerate() { - // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum. - // Use the query's values since we don't use pre-processed reply ranges. - let first_blocknum = if batch_index == 0 { - msg.first_blocknum - } - // Subsequent replies must be >= the last sent first_blocknum. Use the first block - // in the new batch. Batches beyond the first one cannot be empty. - else { - block_from_scid(batch.first().unwrap()) - }; - - // Per spec, the last end_blocknum needs to be >= the query's end_blocknum. Last - // reply calculates difference between the query's end_blocknum and the start of the reply. - // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and first_blocknum - // will be either msg.first_blocknum or a higher block height. + // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum and subsequent + // must be >= the prior reply. We'll simplify this by using zero since its still spec compliant and + // sequence completion is now explicitly. + let first_blocknum = 0; + + // Per spec, the final end_blocknum needs to be >= the query's end_blocknum, so we'll use the + // query's value. Prior batches must use the number of blocks that fit into the message. We'll + // base this off the last SCID in the batch since we've somewhat abusing first_blocknum. let number_of_blocks = if batch_index == batch_count-1 { - msg.end_blocknum() - first_blocknum - } - // Prior replies should use the number of blocks that fit into the reply. Overflow - // safe since first_blocknum is always <= last SCID's block. - else { - block_from_scid(batch.last().unwrap()) - first_blocknum + 1 + msg.end_blocknum() + } else { + block_from_scid(batch.last().unwrap()) + 1 }; // Only true for the last message in a sequence @@ -433,7 +423,7 @@ where fn get_and_clear_pending_msg_events(&self) -> Vec { let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); - std::mem::swap(&mut ret, &mut pending_events); + core::mem::swap(&mut ret, &mut pending_events); ret } } @@ -670,8 +660,13 @@ impl Readable for NodeInfo { } } +const SERIALIZATION_VERSION: u8 = 1; +const MIN_SERIALIZATION_VERSION: u8 = 1; + impl Writeable for NetworkGraph { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); + self.genesis_hash.write(writer)?; (self.channels.len() as u64).write(writer)?; for (ref chan_id, ref chan_info) in self.channels.iter() { @@ -683,12 +678,16 @@ impl Writeable for NetworkGraph { node_id.write(writer)?; node_info.write(writer)?; } + + write_tlv_fields!(writer, {}, {}); Ok(()) } } impl Readable for NetworkGraph { fn read(reader: &mut R) -> Result { + let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + let genesis_hash: BlockHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; let mut channels = BTreeMap::new(); @@ -704,6 +703,8 @@ impl Readable for NetworkGraph { let node_info = Readable::read(reader)?; nodes.insert(node_id, node_info); } + read_tlv_fields!(reader, {}, {}); + Ok(NetworkGraph { genesis_hash, channels, @@ -2209,7 +2210,6 @@ mod tests { // used for testing resumption on same block scids.push(scid_from_parts(108001, 1, 0).unwrap()); - scids.push(scid_from_parts(108001, 2, 0).unwrap()); for scid in scids { let unsigned_announcement = UnsignedChannelAnnouncement { @@ -2307,8 +2307,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0xffffff, - number_of_blocks: 1, + first_blocknum: 0, + number_of_blocks: 0x01000000, sync_complete: true, short_channel_ids: vec![] }, @@ -2321,15 +2321,15 @@ mod tests { &node_id_2, QueryChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0x00800000, + first_blocknum: 1000, number_of_blocks: 1000, }, true, vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0x00800000, - number_of_blocks: 1000, + first_blocknum: 0, + number_of_blocks: 2000, sync_complete: true, short_channel_ids: vec![], } @@ -2349,8 +2349,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0xfe0000, - number_of_blocks: 0xffffffff - 0xfe0000, + first_blocknum: 0, + number_of_blocks: 0xffffffff, sync_complete: true, short_channel_ids: vec![ 0xfffffe_ffffff_ffff, // max @@ -2372,8 +2372,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 100000, - number_of_blocks: 8000, + first_blocknum: 0, + number_of_blocks: 108000, sync_complete: true, short_channel_ids: (100000..=107999) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@ -2395,8 +2395,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 100000, - number_of_blocks: 8000, + first_blocknum: 0, + number_of_blocks: 108000, sync_complete: false, short_channel_ids: (100000..=107999) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@ -2404,8 +2404,8 @@ mod tests { }, ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 108000, - number_of_blocks: 1, + first_blocknum: 0, + number_of_blocks: 108001, sync_complete: true, short_channel_ids: vec![ scid_from_parts(108000, 0, 0).unwrap(), @@ -2427,8 +2427,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 100002, - number_of_blocks: 8000, + first_blocknum: 0, + number_of_blocks: 108002, sync_complete: false, short_channel_ids: (100002..=108001) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@ -2436,12 +2436,11 @@ mod tests { }, ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 108001, - number_of_blocks: 1, + first_blocknum: 0, + number_of_blocks: 108002, sync_complete: true, short_channel_ids: vec![ scid_from_parts(108001, 1, 0).unwrap(), - scid_from_parts(108001, 2, 0).unwrap(), ], } ]