Merge pull request #961 from TheBlueMatt/2021-06-workaround-broken-cln
[rust-lightning] / lightning / src / routing / network_graph.rs
index b8655fb0abd3d59bed177456d572dc2529733b1d..22cd2a92f6f6fba841de9f1cc01d86bf401552fb 100644 (file)
@@ -28,23 +28,27 @@ use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalFie
 use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
 use ln::msgs;
 use util::ser::{Writeable, Readable, Writer};
-use util::logger::Logger;
+use util::logger::{Logger, Level};
 use util::events::{MessageSendEvent, MessageSendEventsProvider};
 use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
 
-use std::{cmp, fmt};
+use prelude::*;
+use alloc::collections::{BTreeMap, btree_map::Entry as BtreeEntry};
+use core::{cmp, fmt};
 use std::sync::{RwLock, RwLockReadGuard};
-use std::sync::atomic::{AtomicUsize, Ordering};
+use core::sync::atomic::{AtomicUsize, Ordering};
 use std::sync::Mutex;
-use std::collections::BTreeMap;
-use std::collections::btree_map::Entry as BtreeEntry;
-use std::ops::Deref;
+use core::ops::Deref;
 use bitcoin::hashes::hex::ToHex;
 
 /// The maximum number of extra bytes which we do not understand in a gossip message before we will
 /// refuse to relay the message.
 const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024;
 
+/// Maximum number of short_channel_ids that will be encoded in one gossip reply message.
+/// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations.
+const MAX_SCIDS_PER_REPLY: usize = 8000;
+
 /// Represents the network as nodes and channels between them
 #[derive(Clone, PartialEq)]
 pub struct NetworkGraph {
@@ -71,11 +75,6 @@ pub struct NetGraphMsgHandler<C: Deref, L: Deref> where C::Target: chain::Access
        full_syncs_requested: AtomicUsize,
        pending_events: Mutex<Vec<MessageSendEvent>>,
        logger: L,
-
-       /// Maximum number of short_channel_ids that will be encoded in one gossip reply message.
-       /// Default is 8000 which ensures a reply fits within the 65k payload limit and is
-       /// consistent with other implementations.
-       max_reply_scids: usize,
 }
 
 impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: chain::Access, L::Target: Logger {
@@ -92,7 +91,6 @@ impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: chain::Access
                        chain_access,
                        pending_events: Mutex::new(vec![]),
                        logger,
-                       max_reply_scids: 8000,
                }
        }
 
@@ -106,7 +104,6 @@ impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: chain::Access
                        chain_access,
                        pending_events: Mutex::new(vec![]),
                        logger,
-                       max_reply_scids: 8000,
                }
        }
 
@@ -155,7 +152,7 @@ macro_rules! secp_verify_sig {
        };
 }
 
-impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for NetGraphMsgHandler<C, L> where C::Target: chain::Access, L::Target: Logger {
+impl<C: Deref , L: Deref > RoutingMessageHandler for NetGraphMsgHandler<C, L> where C::Target: chain::Access, L::Target: Logger {
        fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
                self.network_graph.write().unwrap().update_node_from_announcement(msg, &self.secp_ctx)?;
                Ok(msg.contents.excess_data.len() <=  MAX_EXCESS_BYTES_FOR_RELAY &&
@@ -172,12 +169,16 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
        fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) {
                match update {
                        &msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => {
+                               let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1);
+                               log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}abled.", msg.contents.short_channel_id, if chan_enabled { "en" } else { "dis" });
                                let _ = self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx);
                        },
                        &msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } => {
+                               log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, short_channel_id);
                                self.network_graph.write().unwrap().close_channel_from_update(short_channel_id, is_permanent);
                        },
                        &msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent } => {
+                               log_debug!(self.logger, "{} node graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, node_id);
                                self.network_graph.write().unwrap().fail_node(node_id, is_permanent);
                        },
                }
@@ -320,23 +321,26 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                Ok(())
        }
 
-       /// Processes a query from a peer by finding channels whose funding UTXOs
+       /// Processes a query from a peer by finding announced/public channels whose funding UTXOs
        /// are in the specified block range. Due to message size limits, large range
        /// queries may result in several reply messages. This implementation enqueues
-       /// all reply messages into pending events.
+       /// all reply messages into pending events. Each message will allocate just under 65KiB. A full
+       /// sync of the public routing table with 128k channels will generated 16 messages and allocate ~1MB.
+       /// Logic can be changed to reduce allocation if/when a full sync of the routing table impacts
+       /// memory constrained systems.
        fn handle_query_channel_range(&self, their_node_id: &PublicKey, msg: QueryChannelRange) -> Result<(), LightningError> {
                log_debug!(self.logger, "Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks);
 
                let network_graph = self.network_graph.read().unwrap();
 
-               let start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0);
+               let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0);
 
-               // We receive valid queries with end_blocknum that would overflow SCID conversion.
-               // Manually cap the ending block to avoid this overflow.
+               // We might receive valid queries with end_blocknum that would overflow SCID conversion.
+               // If so, we manually cap the ending block to avoid this overflow.
                let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0);
 
                // Per spec, we must reply to a query. Send an empty message when things are invalid.
-               if msg.chain_hash != network_graph.genesis_hash || start_scid.is_err() || exclusive_end_scid.is_err() {
+               if msg.chain_hash != network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 {
                        let mut pending_events = self.pending_events.lock().unwrap();
                        pending_events.push(MessageSendEvent::SendReplyChannelRange {
                                node_id: their_node_id.clone(),
@@ -348,18 +352,21 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                                        short_channel_ids: vec![],
                                }
                        });
-                       return Ok(());
+                       return Err(LightningError {
+                               err: String::from("query_channel_range could not be processed"),
+                               action: ErrorAction::IgnoreError,
+                       });
                }
 
                // Creates channel batches. We are not checking if the channel is routable
                // (has at least one update). A peer may still want to know the channel
                // exists even if its not yet routable.
-               let mut batches: Vec<Vec<u64>> = vec![Vec::with_capacity(self.max_reply_scids)];
-               for (_, ref chan) in network_graph.get_channels().range(start_scid.unwrap()..exclusive_end_scid.unwrap()) {
+               let mut batches: Vec<Vec<u64>> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)];
+               for (_, ref chan) in network_graph.get_channels().range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) {
                        if let Some(chan_announcement) = &chan.announcement_message {
                                // Construct a new batch if last one is full
                                if batches.last().unwrap().len() == batches.last().unwrap().capacity() {
-                                       batches.push(Vec::with_capacity(self.max_reply_scids));
+                                       batches.push(Vec::with_capacity(MAX_SCIDS_PER_REPLY));
                                }
 
                                let batch = batches.last_mut().unwrap();
@@ -369,35 +376,40 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                drop(network_graph);
 
                let mut pending_events = self.pending_events.lock().unwrap();
-               let mut batch_index = 0;
                let batch_count = batches.len();
-               for batch in batches.into_iter() {
-                       // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum.
-                       // Use the query's values since we don't use pre-processed reply ranges.
-                       let first_blocknum = if batch_index == 0 {
-                               msg.first_blocknum
-                       }
-                       // Subsequent replies must be >= the last sent first_blocknum. Use the first block
-                       // in the new batch.
-                       else {
-                               block_from_scid(batch.first().unwrap())
-                       };
-
-                       // Per spec, the last end_block needs to be >= the query's end_block. Last
-                       // reply calculates difference between the query's end_blocknum and the start of the reply.
-                       // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and first_blocknum
-                       // will be either msg.first_blocknum or a higher block height.
-                       let number_of_blocks = if batch_index == batch_count-1 {
-                               msg.end_blocknum() - first_blocknum
+               let mut prev_batch_endblock = msg.first_blocknum;
+               for (batch_index, batch) in batches.into_iter().enumerate() {
+                       // Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum`
+                       // and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`.
+                       //
+                       // Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each
+                       // reply is >= the previous reply's `first_blocknum` and either exactly the previous
+                       // reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a
+                       // significant diversion from the requirements set by the spec, and, in case of blocks
+                       // with no channel opens (e.g. empty blocks), requires that we use the previous value
+                       // and *not* derive the first_blocknum from the actual first block of the reply.
+                       let first_blocknum = prev_batch_endblock;
+
+                       // Each message carries the number of blocks (from the `first_blocknum`) its contents
+                       // fit in. Though there is no requirement that we use exactly the number of blocks its
+                       // contents are from, except for the bogus requirements c-lightning enforces, above.
+                       //
+                       // Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be
+                       // >= the query's end block. Thus, for the last reply, we calculate the difference
+                       // between the query's end block and the start of the reply.
+                       //
+                       // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and
+                       // first_blocknum will be either msg.first_blocknum or a higher block height.
+                       let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 {
+                               (true, msg.end_blocknum() - first_blocknum)
                        }
                        // Prior replies should use the number of blocks that fit into the reply. Overflow
                        // safe since first_blocknum is always <= last SCID's block.
                        else {
-                               block_from_scid(batch.last().unwrap()) - first_blocknum + 1
+                               (false, block_from_scid(batch.last().unwrap()) - first_blocknum)
                        };
 
-                       // Only true for the last message in a sequence
-                       let sync_complete = batch_index == batch_count - 1;
+                       prev_batch_endblock = first_blocknum + number_of_blocks;
 
                        pending_events.push(MessageSendEvent::SendReplyChannelRange {
                                node_id: their_node_id.clone(),
@@ -409,8 +421,6 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                                        short_channel_ids: batch,
                                }
                        });
-
-                       batch_index += 1;
                }
 
                Ok(())
@@ -433,7 +443,7 @@ where
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                let mut ret = Vec::new();
                let mut pending_events = self.pending_events.lock().unwrap();
-               std::mem::swap(&mut ret, &mut pending_events);
+               core::mem::swap(&mut ret, &mut pending_events);
                ret
        }
 }
@@ -469,14 +479,14 @@ impl fmt::Display for DirectionalChannelInfo {
        }
 }
 
-impl_writeable!(DirectionalChannelInfo, 0, {
-       last_update,
-       enabled,
-       cltv_expiry_delta,
-       htlc_minimum_msat,
-       htlc_maximum_msat,
-       fees,
-       last_update_message
+impl_writeable_tlv_based!(DirectionalChannelInfo, {
+       (0, last_update, required),
+       (2, enabled, required),
+       (4, cltv_expiry_delta, required),
+       (6, htlc_minimum_msat, required),
+       (8, htlc_maximum_msat, required),
+       (10, fees, required),
+       (12, last_update_message, required),
 });
 
 #[derive(Clone, Debug, PartialEq)]
@@ -510,14 +520,14 @@ impl fmt::Display for ChannelInfo {
        }
 }
 
-impl_writeable!(ChannelInfo, 0, {
-       features,
-       node_one,
-       one_to_two,
-       node_two,
-       two_to_one,
-       capacity_sats,
-       announcement_message
+impl_writeable_tlv_based!(ChannelInfo, {
+       (0, features, required),
+       (2, node_one, required),
+       (4, one_to_two, required),
+       (6, node_two, required),
+       (8, two_to_one, required),
+       (10, capacity_sats, required),
+       (12, announcement_message, required),
 });
 
 
@@ -531,24 +541,10 @@ pub struct RoutingFees {
        pub proportional_millionths: u32,
 }
 
-impl Readable for RoutingFees{
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<RoutingFees, DecodeError> {
-               let base_msat: u32 = Readable::read(reader)?;
-               let proportional_millionths: u32 = Readable::read(reader)?;
-               Ok(RoutingFees {
-                       base_msat,
-                       proportional_millionths,
-               })
-       }
-}
-
-impl Writeable for RoutingFees {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               self.base_msat.write(writer)?;
-               self.proportional_millionths.write(writer)?;
-               Ok(())
-       }
-}
+impl_writeable_tlv_based!(RoutingFees, {
+       (0, base_msat, required),
+       (2, proportional_millionths, required)
+});
 
 #[derive(Clone, Debug, PartialEq)]
 /// Information received in the latest node_announcement from this node.
@@ -573,48 +569,14 @@ pub struct NodeAnnouncementInfo {
        pub announcement_message: Option<NodeAnnouncement>
 }
 
-impl Writeable for NodeAnnouncementInfo {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               self.features.write(writer)?;
-               self.last_update.write(writer)?;
-               self.rgb.write(writer)?;
-               self.alias.write(writer)?;
-               (self.addresses.len() as u64).write(writer)?;
-               for ref addr in &self.addresses {
-                       addr.write(writer)?;
-               }
-               self.announcement_message.write(writer)?;
-               Ok(())
-       }
-}
-
-impl Readable for NodeAnnouncementInfo {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<NodeAnnouncementInfo, DecodeError> {
-               let features = Readable::read(reader)?;
-               let last_update = Readable::read(reader)?;
-               let rgb = Readable::read(reader)?;
-               let alias = Readable::read(reader)?;
-               let addresses_count: u64 = Readable::read(reader)?;
-               let mut addresses = Vec::with_capacity(cmp::min(addresses_count, MAX_ALLOC_SIZE / 40) as usize);
-               for _ in 0..addresses_count {
-                       match Readable::read(reader) {
-                               Ok(Ok(addr)) => { addresses.push(addr); },
-                               Ok(Err(_)) => return Err(DecodeError::InvalidValue),
-                               Err(DecodeError::ShortRead) => return Err(DecodeError::BadLengthDescriptor),
-                               _ => unreachable!(),
-                       }
-               }
-               let announcement_message = Readable::read(reader)?;
-               Ok(NodeAnnouncementInfo {
-                       features,
-                       last_update,
-                       rgb,
-                       alias,
-                       addresses,
-                       announcement_message
-               })
-       }
-}
+impl_writeable_tlv_based!(NodeAnnouncementInfo, {
+       (0, features, required),
+       (2, last_update, required),
+       (4, rgb, required),
+       (6, alias, required),
+       (8, announcement_message, option),
+       (10, addresses, vec_type),
+});
 
 #[derive(Clone, Debug, PartialEq)]
 /// Details about a node in the network, known from the network announcement.
@@ -639,39 +601,19 @@ impl fmt::Display for NodeInfo {
        }
 }
 
-impl Writeable for NodeInfo {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               (self.channels.len() as u64).write(writer)?;
-               for ref chan in self.channels.iter() {
-                       chan.write(writer)?;
-               }
-               self.lowest_inbound_channel_fees.write(writer)?;
-               self.announcement_info.write(writer)?;
-               Ok(())
-       }
-}
-
-const MAX_ALLOC_SIZE: u64 = 64*1024;
+impl_writeable_tlv_based!(NodeInfo, {
+       (0, lowest_inbound_channel_fees, option),
+       (2, announcement_info, option),
+       (4, channels, vec_type),
+});
 
-impl Readable for NodeInfo {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<NodeInfo, DecodeError> {
-               let channels_count: u64 = Readable::read(reader)?;
-               let mut channels = Vec::with_capacity(cmp::min(channels_count, MAX_ALLOC_SIZE / 8) as usize);
-               for _ in 0..channels_count {
-                       channels.push(Readable::read(reader)?);
-               }
-               let lowest_inbound_channel_fees = Readable::read(reader)?;
-               let announcement_info = Readable::read(reader)?;
-               Ok(NodeInfo {
-                       channels,
-                       lowest_inbound_channel_fees,
-                       announcement_info,
-               })
-       }
-}
+const SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 1;
 
 impl Writeable for NetworkGraph {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+               write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
+
                self.genesis_hash.write(writer)?;
                (self.channels.len() as u64).write(writer)?;
                for (ref chan_id, ref chan_info) in self.channels.iter() {
@@ -683,12 +625,16 @@ impl Writeable for NetworkGraph {
                        node_id.write(writer)?;
                        node_info.write(writer)?;
                }
+
+               write_tlv_fields!(writer, {});
                Ok(())
        }
 }
 
 impl Readable for NetworkGraph {
        fn read<R: ::std::io::Read>(reader: &mut R) -> Result<NetworkGraph, DecodeError> {
+               let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
+
                let genesis_hash: BlockHash = Readable::read(reader)?;
                let channels_count: u64 = Readable::read(reader)?;
                let mut channels = BTreeMap::new();
@@ -704,6 +650,8 @@ impl Readable for NetworkGraph {
                        let node_info = Readable::read(reader)?;
                        nodes.insert(node_id, node_info);
                }
+               read_tlv_fields!(reader, {});
+
                Ok(NetworkGraph {
                        genesis_hash,
                        channels,
@@ -785,7 +733,7 @@ impl NetworkGraph {
                        Some(node) => {
                                if let Some(node_info) = node.announcement_info.as_ref() {
                                        if node_info.last_update  >= msg.timestamp {
-                                               return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError});
+                                               return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)});
                                        }
                                }
 
@@ -906,7 +854,7 @@ impl NetworkGraph {
                                        Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.short_channel_id);
                                        *entry.get_mut() = chan_info;
                                } else {
-                                       return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError})
+                                       return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)})
                                }
                        },
                        BtreeEntry::Vacant(entry) => {
@@ -1008,7 +956,7 @@ impl NetworkGraph {
                                        ( $target: expr, $src_node: expr) => {
                                                if let Some(existing_chan_info) = $target.as_ref() {
                                                        if existing_chan_info.last_update >= msg.timestamp {
-                                                               return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError});
+                                                               return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)});
                                                        }
                                                        chan_was_enabled = existing_chan_info.enabled;
                                                } else {
@@ -1124,6 +1072,7 @@ mod tests {
        use util::logger::Logger;
        use util::ser::{Readable, Writeable};
        use util::events::{MessageSendEvent, MessageSendEventsProvider};
+       use util::scid_utils::scid_from_parts;
 
        use bitcoin::hashes::sha256d::Hash as Sha256dHash;
        use bitcoin::hashes::Hash;
@@ -1138,6 +1087,7 @@ mod tests {
        use bitcoin::secp256k1::key::{PublicKey, SecretKey};
        use bitcoin::secp256k1::{All, Secp256k1};
 
+       use prelude::*;
        use std::sync::Arc;
 
        fn create_net_graph_msg_handler() -> (Secp256k1<All>, NetGraphMsgHandler<Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>) {
@@ -2184,7 +2134,7 @@ mod tests {
 
        #[test]
        fn handling_query_channel_range() {
-               let (secp_ctx, mut net_graph_msg_handler) = create_net_graph_msg_handler();
+               let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler();
 
                let chain_hash = genesis_block(Network::Testnet).header.block_hash();
                let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
@@ -2196,17 +2146,19 @@ mod tests {
                let bitcoin_key_1 = PublicKey::from_secret_key(&secp_ctx, node_1_btckey);
                let bitcoin_key_2 = PublicKey::from_secret_key(&secp_ctx, node_2_btckey);
 
-               let scids: Vec<u64> = vec![
-                       0x000000_000000_0000, // 0x0x0
-                       0x000001_000000_0000, // 1x0x0
-                       0x000002_000000_0000, // 2x0x0
-                       0x000002_000001_0000, // 2x1x0
-                       0x000100_000000_0000, // 256x0x0
-                       0x000101_000000_0000, // 257x0x0
-                       0xfffffe_ffffff_ffff, // max
-                       0xffffff_ffffff_ffff, // never
+               let mut scids: Vec<u64> = vec![
+                       scid_from_parts(0xfffffe, 0xffffff, 0xffff).unwrap(), // max
+                       scid_from_parts(0xffffff, 0xffffff, 0xffff).unwrap(), // never
                ];
 
+               // used for testing multipart reply across blocks
+               for block in 100000..=108001 {
+                       scids.push(scid_from_parts(block, 0, 0).unwrap());
+               }
+
+               // used for testing resumption on same block
+               scids.push(scid_from_parts(108001, 1, 0).unwrap());
+
                for scid in scids {
                        let unsigned_announcement = UnsignedChannelAnnouncement {
                                features: ChannelFeatures::known(),
@@ -2233,8 +2185,8 @@ mod tests {
                        };
                }
 
-               // Empty reply when number_of_blocks=0
-               test_handling_query_channel_range(
+               // Error when number_of_blocks=0
+               do_handling_query_channel_range(
                        &net_graph_msg_handler,
                        &node_id_2,
                        QueryChannelRange {
@@ -2242,6 +2194,7 @@ mod tests {
                                first_blocknum: 0,
                                number_of_blocks: 0,
                        },
+                       false,
                        vec![ReplyChannelRange {
                                chain_hash: chain_hash.clone(),
                                first_blocknum: 0,
@@ -2251,8 +2204,8 @@ mod tests {
                        }]
                );
 
-               // Empty when wrong chain
-               test_handling_query_channel_range(
+               // Error when wrong chain
+               do_handling_query_channel_range(
                        &net_graph_msg_handler,
                        &node_id_2,
                        QueryChannelRange {
@@ -2260,6 +2213,7 @@ mod tests {
                                first_blocknum: 0,
                                number_of_blocks: 0xffff_ffff,
                        },
+                       false,
                        vec![ReplyChannelRange {
                                chain_hash: genesis_block(Network::Bitcoin).header.block_hash(),
                                first_blocknum: 0,
@@ -2269,27 +2223,27 @@ mod tests {
                        }]
                );
 
-               // Empty reply when first_blocknum > 0xffffff
-               test_handling_query_channel_range(
+               // Error when first_blocknum > 0xffffff
+               do_handling_query_channel_range(
                        &net_graph_msg_handler,
                        &node_id_2,
                        QueryChannelRange {
                                chain_hash: chain_hash.clone(),
                                first_blocknum: 0x01000000,
-                               number_of_blocks: 0xffffffff,
+                               number_of_blocks: 0xffff_ffff,
                        },
+                       false,
                        vec![ReplyChannelRange {
                                chain_hash: chain_hash.clone(),
                                first_blocknum: 0x01000000,
-                               number_of_blocks: 0xffffffff,
+                               number_of_blocks: 0xffff_ffff,
                                sync_complete: true,
                                short_channel_ids: vec![]
                        }]
                );
 
-               // Empty reply when max valid SCID block num.
-               // Unlike prior test this is a valid but no results are found
-               test_handling_query_channel_range(
+               // Empty reply when max valid SCID block num
+               do_handling_query_channel_range(
                        &net_graph_msg_handler,
                        &node_id_2,
                        QueryChannelRange {
@@ -2297,6 +2251,7 @@ mod tests {
                                first_blocknum: 0xffffff,
                                number_of_blocks: 1,
                        },
+                       true,
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
@@ -2309,18 +2264,19 @@ mod tests {
                );
 
                // No results in valid query range
-               test_handling_query_channel_range(
+               do_handling_query_channel_range(
                        &net_graph_msg_handler,
                        &node_id_2,
                        QueryChannelRange {
                                chain_hash: chain_hash.clone(),
-                               first_blocknum: 0x00100000,
+                               first_blocknum: 1000,
                                number_of_blocks: 1000,
                        },
+                       true,
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0x00100000,
+                                       first_blocknum: 1000,
                                        number_of_blocks: 1000,
                                        sync_complete: true,
                                        short_channel_ids: vec![],
@@ -2328,189 +2284,134 @@ mod tests {
                        ]
                );
 
-               // Single reply - all blocks
-               test_handling_query_channel_range(
-                       &net_graph_msg_handler,
-                       &node_id_2,
-                       QueryChannelRange {
-                               chain_hash: chain_hash.clone(),
-                               first_blocknum: 0,
-                               number_of_blocks: 0xffffffff,
-                       },
-                       vec![
-                               ReplyChannelRange {
-                                       chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 0xffffffff,
-                                       sync_complete: true,
-                                       short_channel_ids: vec![
-                                               0x000000_000000_0000, // 0x0x0
-                                               0x000001_000000_0000, // 1x0x0
-                                               0x000002_000000_0000, // 2x0x0
-                                               0x000002_000001_0000, // 2x1x0
-                                               0x000100_000000_0000, // 256x0x0
-                                               0x000101_000000_0000, // 257x0x0
-                                               0xfffffe_ffffff_ffff, // max
-                                       ]
-                               }
-                       ]
-               );
-
-               // Single reply - overflow of first_blocknum + number_of_blocks
-               test_handling_query_channel_range(
+               // Overflow first_blocknum + number_of_blocks
+               do_handling_query_channel_range(
                        &net_graph_msg_handler,
                        &node_id_2,
                        QueryChannelRange {
                                chain_hash: chain_hash.clone(),
-                               first_blocknum: 1,
+                               first_blocknum: 0xfe0000,
                                number_of_blocks: 0xffffffff,
                        },
+                       true,
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 1,
-                                       number_of_blocks: 0xfffffffe,
+                                       first_blocknum: 0xfe0000,
+                                       number_of_blocks: 0xffffffff - 0xfe0000,
                                        sync_complete: true,
                                        short_channel_ids: vec![
-                                               0x000001_000000_0000, // 1x0x0
-                                               0x000002_000000_0000, // 2x0x0
-                                               0x000002_000001_0000, // 2x1x0
-                                               0x000100_000000_0000, // 256x0x0
-                                               0x000101_000000_0000, // 257x0x0
                                                0xfffffe_ffffff_ffff, // max
                                        ]
                                }
                        ]
                );
 
-               // Single reply - query larger than found results
-               test_handling_query_channel_range(
+               // Single block exactly full
+               do_handling_query_channel_range(
                        &net_graph_msg_handler,
                        &node_id_2,
                        QueryChannelRange {
                                chain_hash: chain_hash.clone(),
-                               first_blocknum: 100,
-                               number_of_blocks: 1000,
+                               first_blocknum: 100000,
+                               number_of_blocks: 8000,
                        },
+                       true,
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 100,
-                                       number_of_blocks: 1000,
-                                       sync_complete: true,
-                                       short_channel_ids: vec![
-                                               0x000100_000000_0000, // 256x0x0
-                                               0x000101_000000_0000, // 257x0x0
-                                       ]
-                               }
-                       ]
-               );
-
-               // Tests below here will chunk replies
-               net_graph_msg_handler.max_reply_scids = 1;
-
-               // Multipart - new block per messages
-               test_handling_query_channel_range(
-                       &net_graph_msg_handler,
-                       &node_id_2,
-                       QueryChannelRange {
-                               chain_hash: chain_hash.clone(),
-                               first_blocknum: 0,
-                               number_of_blocks: 2,
-                       },
-                       vec![
-                               ReplyChannelRange {
-                                       chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 1,
-                                       sync_complete: false,
-                                       short_channel_ids: vec![
-                                               0x000000_000000_0000, // 0x0x0
-                                       ]
-                               },
-                               ReplyChannelRange {
-                                       chain_hash: chain_hash.clone(),
-                                       first_blocknum: 1,
-                                       number_of_blocks: 1,
+                                       first_blocknum: 100000,
+                                       number_of_blocks: 8000,
                                        sync_complete: true,
-                                       short_channel_ids: vec![
-                                               0x000001_000000_0000, // 1x0x0
-                                       ]
+                                       short_channel_ids: (100000..=107999)
+                                               .map(|block| scid_from_parts(block, 0, 0).unwrap())
+                                               .collect(),
                                },
                        ]
                );
 
-               // Multiplart - resumption of same block
-               test_handling_query_channel_range(
+               // Multiple split on new block
+               do_handling_query_channel_range(
                        &net_graph_msg_handler,
                        &node_id_2,
                        QueryChannelRange {
                                chain_hash: chain_hash.clone(),
-                               first_blocknum: 2,
-                               number_of_blocks: 1,
+                               first_blocknum: 100000,
+                               number_of_blocks: 8001,
                        },
+                       true,
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 2,
-                                       number_of_blocks: 1,
+                                       first_blocknum: 100000,
+                                       number_of_blocks: 7999,
                                        sync_complete: false,
-                                       short_channel_ids: vec![
-                                               0x000002_000000_0000, // 2x0x0
-                                       ]
+                                       short_channel_ids: (100000..=107999)
+                                               .map(|block| scid_from_parts(block, 0, 0).unwrap())
+                                               .collect(),
                                },
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 2,
-                                       number_of_blocks: 1,
+                                       first_blocknum: 107999,
+                                       number_of_blocks: 2,
                                        sync_complete: true,
                                        short_channel_ids: vec![
-                                               0x000002_000001_0000, // 2x1x0
-                                       ]
+                                               scid_from_parts(108000, 0, 0).unwrap(),
+                                       ],
                                }
                        ]
                );
 
-               // Multipart - query larger than found results, similar to single reply
-               test_handling_query_channel_range(
+               // Multiple split on same block
+               do_handling_query_channel_range(
                        &net_graph_msg_handler,
                        &node_id_2,
                        QueryChannelRange {
                                chain_hash: chain_hash.clone(),
-                               first_blocknum: 100,
-                               number_of_blocks: 1000,
+                               first_blocknum: 100002,
+                               number_of_blocks: 8000,
                        },
+                       true,
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 100,    // <=  query first_blocknum
-                                       number_of_blocks: 157,
+                                       first_blocknum: 100002,
+                                       number_of_blocks: 7999,
                                        sync_complete: false,
-                                       short_channel_ids: vec![
-                                               0x000100_000000_0000, // 256x0x0
-                                       ]
+                                       short_channel_ids: (100002..=108001)
+                                               .map(|block| scid_from_parts(block, 0, 0).unwrap())
+                                               .collect(),
                                },
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 257,
-                                       number_of_blocks: 843, // >= query first_blocknum+number_of_blocks
+                                       first_blocknum: 108001,
+                                       number_of_blocks: 1,
                                        sync_complete: true,
                                        short_channel_ids: vec![
-                                               0x000101_000000_0000, // 257x0x0
-                                       ]
+                                               scid_from_parts(108001, 1, 0).unwrap(),
+                                       ],
                                }
                        ]
                );
        }
 
-       fn test_handling_query_channel_range(
+       fn do_handling_query_channel_range(
                net_graph_msg_handler: &NetGraphMsgHandler<Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>,
                test_node_id: &PublicKey,
                msg: QueryChannelRange,
+               expected_ok: bool,
                expected_replies: Vec<ReplyChannelRange>
        ) {
+               let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1);
+               let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum;
+               let query_end_blocknum = msg.end_blocknum();
                let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg);
-               assert!(result.is_ok());
+
+               if expected_ok {
+                       assert!(result.is_ok());
+               } else {
+                       assert!(result.is_err());
+               }
 
                let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), expected_replies.len());
@@ -2525,6 +2426,17 @@ mod tests {
                                        assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks);
                                        assert_eq!(msg.sync_complete, expected_reply.sync_complete);
                                        assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids);
+
+                                       // Enforce exactly the sequencing requirements present on c-lightning v0.9.3
+                                       assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1));
+                                       assert!(msg.first_blocknum >= max_firstblocknum);
+                                       max_firstblocknum = msg.first_blocknum;
+                                       c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks);
+
+                                       // Check that the last block count is >= the query's end_blocknum
+                                       if i == events.len() - 1 {
+                                               assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum);
+                                       }
                                },
                                _ => panic!("expected MessageSendEvent::SendReplyChannelRange"),
                        }
@@ -2546,3 +2458,30 @@ mod tests {
                assert!(result.is_err());
        }
 }
+
+#[cfg(all(test, feature = "unstable"))]
+mod benches {
+       use super::*;
+
+       use test::Bencher;
+       use std::io::Read;
+
+       #[bench]
+       fn read_network_graph(bench: &mut Bencher) {
+               let mut d = ::routing::router::test_utils::get_route_file().unwrap();
+               let mut v = Vec::new();
+               d.read_to_end(&mut v).unwrap();
+               bench.iter(|| {
+                       let _ = NetworkGraph::read(&mut std::io::Cursor::new(&v)).unwrap();
+               });
+       }
+
+       #[bench]
+       fn write_network_graph(bench: &mut Bencher) {
+               let mut d = ::routing::router::test_utils::get_route_file().unwrap();
+               let net_graph = NetworkGraph::read(&mut d).unwrap();
+               bench.iter(|| {
+                       let _ = net_graph.encode();
+               });
+       }
+}