X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fnetwork_graph.rs;h=9e4813e9155926fac85b20d307af1bb9ac8a1dc8;hb=002a5db5b07fd317545d3aa4f1a217cd40b608e2;hp=8754656f40551250070bbf4fd6042b9c714ba2b4;hpb=3a57cfc7c68f9e0e92a19b6803d3e9dac9e75678;p=rust-lightning diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 8754656f..9e4813e9 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -1,3 +1,12 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + //! The top-level network map tracking logic lives here. use bitcoin::secp256k1::key::PublicKey; @@ -7,67 +16,130 @@ use bitcoin::secp256k1; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; use bitcoin::blockdata::script::Builder; +use bitcoin::blockdata::transaction::TxOut; use bitcoin::blockdata::opcodes; +use bitcoin::hash_types::BlockHash; -use chain::chaininterface::{ChainError, ChainWatchInterface}; +use chain; +use chain::Access; use ln::features::{ChannelFeatures, NodeFeatures}; -use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, OptionalField}; +use ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; +use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField}; +use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; -use util::logger::Logger; - -use std::{cmp, fmt}; -use std::sync::RwLock; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::collections::BTreeMap; -use std::collections::btree_map::Entry as BtreeEntry; -use std::ops::Deref; +use util::logger::{Logger, Level}; +use util::events::{MessageSendEvent, MessageSendEventsProvider}; +use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; + +use prelude::*; +use alloc::collections::{BTreeMap, btree_map::Entry as BtreeEntry}; +use core::{cmp, fmt}; +use sync::{RwLock, RwLockReadGuard}; +use core::sync::atomic::{AtomicUsize, Ordering}; +use sync::Mutex; +use core::ops::Deref; use bitcoin::hashes::hex::ToHex; +/// The maximum number of extra bytes which we do not understand in a gossip message before we will +/// refuse to relay the message. +const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024; + +/// Maximum number of short_channel_ids that will be encoded in one gossip reply message. +/// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations. +const MAX_SCIDS_PER_REPLY: usize = 8000; + +/// Represents the network as nodes and channels between them +#[derive(Clone, PartialEq)] +pub struct NetworkGraph { + genesis_hash: BlockHash, + channels: BTreeMap, + nodes: BTreeMap, +} + +/// A simple newtype for RwLockReadGuard<'a, NetworkGraph>. +/// This exists only to make accessing a RwLock possible from +/// the C bindings, as it can be done directly in Rust code. +pub struct LockedNetworkGraph<'a>(pub RwLockReadGuard<'a, NetworkGraph>); + /// Receives and validates network updates from peers, /// stores authentic and relevant data as a network graph. /// This network graph is then used for routing payments. /// Provides interface to help with initial routing sync by /// serving historical announcements. -pub struct NetGraphMsgHandler where C::Target: ChainWatchInterface, L::Target: Logger { +pub struct NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { secp_ctx: Secp256k1, /// Representation of the payment channel network pub network_graph: RwLock, - chain_monitor: C, + chain_access: Option, full_syncs_requested: AtomicUsize, + pending_events: Mutex>, logger: L, } -impl NetGraphMsgHandler where C::Target: ChainWatchInterface, L::Target: Logger { +impl NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming a fresh network graph. /// Chain monitor is used to make sure announced channels exist on-chain, /// channel data is correct, and that the announcement is signed with /// channel owners' keys. - pub fn new(chain_monitor: C, logger: L) -> Self { + pub fn new(genesis_hash: BlockHash, chain_access: Option, logger: L) -> Self { NetGraphMsgHandler { secp_ctx: Secp256k1::verification_only(), - network_graph: RwLock::new(NetworkGraph { - channels: BTreeMap::new(), - nodes: BTreeMap::new(), - }), + network_graph: RwLock::new(NetworkGraph::new(genesis_hash)), full_syncs_requested: AtomicUsize::new(0), - chain_monitor, + chain_access, + pending_events: Mutex::new(vec![]), logger, } } /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming an existing Network Graph. - pub fn from_net_graph(chain_monitor: C, logger: L, network_graph: NetworkGraph) -> Self { + pub fn from_net_graph(chain_access: Option, logger: L, network_graph: NetworkGraph) -> Self { NetGraphMsgHandler { secp_ctx: Secp256k1::verification_only(), network_graph: RwLock::new(network_graph), full_syncs_requested: AtomicUsize::new(0), - chain_monitor, + chain_access, + pending_events: Mutex::new(vec![]), logger, } } + + /// Adds a provider used to check new announcements. Does not affect + /// existing announcements unless they are updated. + /// Add, update or remove the provider would replace the current one. + pub fn add_chain_access(&mut self, chain_access: Option) { + self.chain_access = chain_access; + } + + /// Take a read lock on the network_graph and return it in the C-bindings + /// newtype helper. This is likely only useful when called via the C + /// bindings as you can call `self.network_graph.read().unwrap()` in Rust + /// yourself. + pub fn read_locked_graph<'a>(&'a self) -> LockedNetworkGraph<'a> { + LockedNetworkGraph(self.network_graph.read().unwrap()) + } + + /// Returns true when a full routing table sync should be performed with a peer. + fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { + //TODO: Determine whether to request a full sync based on the network map. + const FULL_SYNCS_TO_REQUEST: usize = 5; + if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST { + self.full_syncs_requested.fetch_add(1, Ordering::AcqRel); + true + } else { + false + } + } +} + +impl<'a> LockedNetworkGraph<'a> { + /// Get a reference to the NetworkGraph which this read-lock contains. + pub fn graph(&self) -> &NetworkGraph { + &*self.0 + } } @@ -80,65 +152,44 @@ macro_rules! secp_verify_sig { }; } -impl RoutingMessageHandler for NetGraphMsgHandler where C::Target: ChainWatchInterface, L::Target: Logger { +impl RoutingMessageHandler for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { - self.network_graph.write().unwrap().update_node_from_announcement(msg, Some(&self.secp_ctx)) + self.network_graph.write().unwrap().update_node_from_announcement(msg, &self.secp_ctx)?; + Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && + msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && + msg.contents.excess_data.len() + msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { - if msg.contents.node_id_1 == msg.contents.node_id_2 || msg.contents.bitcoin_key_1 == msg.contents.bitcoin_key_2 { - return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); - } - - let utxo_value = match self.chain_monitor.get_chain_utxo(msg.contents.chain_hash, msg.contents.short_channel_id) { - Ok((script_pubkey, value)) => { - let expected_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2) - .push_slice(&msg.contents.bitcoin_key_1.serialize()) - .push_slice(&msg.contents.bitcoin_key_2.serialize()) - .push_opcode(opcodes::all::OP_PUSHNUM_2) - .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); - if script_pubkey != expected_script { - return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", script_pubkey.to_hex(), expected_script.to_hex()), action: ErrorAction::IgnoreError}); - } - //TODO: Check if value is worth storing, use it to inform routing, and compare it - //to the new HTLC max field in channel_update - Some(value) - }, - Err(ChainError::NotSupported) => { - // Tentatively accept, potentially exposing us to DoS attacks - None - }, - Err(ChainError::NotWatched) => { - return Err(LightningError{err: format!("Channel announced on an unknown chain ({})", msg.contents.chain_hash.encode().to_hex()), action: ErrorAction::IgnoreError}); - }, - Err(ChainError::UnknownTx) => { - return Err(LightningError{err: "Channel announced without corresponding UTXO entry".to_owned(), action: ErrorAction::IgnoreError}); - }, - }; - let result = self.network_graph.write().unwrap().update_channel_from_announcement(msg, utxo_value, Some(&self.secp_ctx)); + self.network_graph.write().unwrap().update_channel_from_announcement(msg, &self.chain_access, &self.secp_ctx)?; log_trace!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); - result + Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) { match update { &msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => { - let _ = self.network_graph.write().unwrap().update_channel(msg, Some(&self.secp_ctx)); + let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1); + log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}abled.", msg.contents.short_channel_id, if chan_enabled { "en" } else { "dis" }); + let _ = self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx); }, &msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } => { + log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, short_channel_id); self.network_graph.write().unwrap().close_channel_from_update(short_channel_id, is_permanent); }, &msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent } => { + log_debug!(self.logger, "{} node graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, node_id); self.network_graph.write().unwrap().fail_node(node_id, is_permanent); }, } } fn handle_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result { - self.network_graph.write().unwrap().update_channel(msg, Some(&self.secp_ctx)) + self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx)?; + Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } - fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option, Option)> { + fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option, Option)> { let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); let mut iter = network_graph.get_channels().range(starting_point..); @@ -166,7 +217,7 @@ impl RoutingMessageHandler for N result } - fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec { + fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec { let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); let mut iter = if let Some(pubkey) = starting_point { @@ -190,19 +241,214 @@ impl RoutingMessageHandler for N result } - fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { - //TODO: Determine whether to request a full sync based on the network map. - const FULL_SYNCS_TO_REQUEST: usize = 5; - if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST { - self.full_syncs_requested.fetch_add(1, Ordering::AcqRel); - true - } else { - false + /// Initiates a stateless sync of routing gossip information with a peer + /// using gossip_queries. The default strategy used by this implementation + /// is to sync the full block range with several peers. + /// + /// We should expect one or more reply_channel_range messages in response + /// to our query_channel_range. Each reply will enqueue a query_scid message + /// to request gossip messages for each channel. The sync is considered complete + /// when the final reply_scids_end message is received, though we are not + /// tracking this directly. + fn sync_routing_table(&self, their_node_id: &PublicKey, init_msg: &Init) { + + // We will only perform a sync with peers that support gossip_queries. + if !init_msg.features.supports_gossip_queries() { + return (); + } + + // Check if we need to perform a full synchronization with this peer + if !self.should_request_full_sync(their_node_id) { + return (); + } + + let first_blocknum = 0; + let number_of_blocks = 0xffffffff; + log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks); + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(MessageSendEvent::SendChannelRangeQuery { + node_id: their_node_id.clone(), + msg: QueryChannelRange { + chain_hash: self.network_graph.read().unwrap().genesis_hash, + first_blocknum, + number_of_blocks, + }, + }); + } + + /// Statelessly processes a reply to a channel range query by immediately + /// sending an SCID query with SCIDs in the reply. To keep this handler + /// stateless, it does not validate the sequencing of replies for multi- + /// reply ranges. It does not validate whether the reply(ies) cover the + /// queried range. It also does not filter SCIDs to only those in the + /// original query range. We also do not validate that the chain_hash + /// matches the chain_hash of the NetworkGraph. Any chan_ann message that + /// does not match our chain_hash will be rejected when the announcement is + /// processed. + fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: ReplyChannelRange) -> Result<(), LightningError> { + log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, sync_complete={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete, msg.short_channel_ids.len(),); + + log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len()); + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(MessageSendEvent::SendShortIdsQuery { + node_id: their_node_id.clone(), + msg: QueryShortChannelIds { + chain_hash: msg.chain_hash, + short_channel_ids: msg.short_channel_ids, + } + }); + + Ok(()) + } + + /// When an SCID query is initiated the remote peer will begin streaming + /// gossip messages. In the event of a failure, we may have received + /// some channel information. Before trying with another peer, the + /// caller should update its set of SCIDs that need to be queried. + fn handle_reply_short_channel_ids_end(&self, their_node_id: &PublicKey, msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { + log_debug!(self.logger, "Handling reply_short_channel_ids_end peer={}, full_information={}", log_pubkey!(their_node_id), msg.full_information); + + // If the remote node does not have up-to-date information for the + // chain_hash they will set full_information=false. We can fail + // the result and try again with a different peer. + if !msg.full_information { + return Err(LightningError { + err: String::from("Received reply_short_channel_ids_end with no information"), + action: ErrorAction::IgnoreError + }); + } + + Ok(()) + } + + /// Processes a query from a peer by finding announced/public channels whose funding UTXOs + /// are in the specified block range. Due to message size limits, large range + /// queries may result in several reply messages. This implementation enqueues + /// all reply messages into pending events. Each message will allocate just under 65KiB. A full + /// sync of the public routing table with 128k channels will generated 16 messages and allocate ~1MB. + /// Logic can be changed to reduce allocation if/when a full sync of the routing table impacts + /// memory constrained systems. + fn handle_query_channel_range(&self, their_node_id: &PublicKey, msg: QueryChannelRange) -> Result<(), LightningError> { + log_debug!(self.logger, "Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks); + + let network_graph = self.network_graph.read().unwrap(); + + let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0); + + // We might receive valid queries with end_blocknum that would overflow SCID conversion. + // If so, we manually cap the ending block to avoid this overflow. + let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); + + // Per spec, we must reply to a query. Send an empty message when things are invalid. + if msg.chain_hash != network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(MessageSendEvent::SendReplyChannelRange { + node_id: their_node_id.clone(), + msg: ReplyChannelRange { + chain_hash: msg.chain_hash.clone(), + first_blocknum: msg.first_blocknum, + number_of_blocks: msg.number_of_blocks, + sync_complete: true, + short_channel_ids: vec![], + } + }); + return Err(LightningError { + err: String::from("query_channel_range could not be processed"), + action: ErrorAction::IgnoreError, + }); + } + + // Creates channel batches. We are not checking if the channel is routable + // (has at least one update). A peer may still want to know the channel + // exists even if its not yet routable. + let mut batches: Vec> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)]; + for (_, ref chan) in network_graph.get_channels().range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) { + if let Some(chan_announcement) = &chan.announcement_message { + // Construct a new batch if last one is full + if batches.last().unwrap().len() == batches.last().unwrap().capacity() { + batches.push(Vec::with_capacity(MAX_SCIDS_PER_REPLY)); + } + + let batch = batches.last_mut().unwrap(); + batch.push(chan_announcement.contents.short_channel_id); + } + } + drop(network_graph); + + let mut pending_events = self.pending_events.lock().unwrap(); + let batch_count = batches.len(); + let mut prev_batch_endblock = msg.first_blocknum; + for (batch_index, batch) in batches.into_iter().enumerate() { + // Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum` + // and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`. + // + // Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each + // reply is >= the previous reply's `first_blocknum` and either exactly the previous + // reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a + // significant diversion from the requirements set by the spec, and, in case of blocks + // with no channel opens (e.g. empty blocks), requires that we use the previous value + // and *not* derive the first_blocknum from the actual first block of the reply. + let first_blocknum = prev_batch_endblock; + + // Each message carries the number of blocks (from the `first_blocknum`) its contents + // fit in. Though there is no requirement that we use exactly the number of blocks its + // contents are from, except for the bogus requirements c-lightning enforces, above. + // + // Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be + // >= the query's end block. Thus, for the last reply, we calculate the difference + // between the query's end block and the start of the reply. + // + // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and + // first_blocknum will be either msg.first_blocknum or a higher block height. + let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 { + (true, msg.end_blocknum() - first_blocknum) + } + // Prior replies should use the number of blocks that fit into the reply. Overflow + // safe since first_blocknum is always <= last SCID's block. + else { + (false, block_from_scid(batch.last().unwrap()) - first_blocknum) + }; + + prev_batch_endblock = first_blocknum + number_of_blocks; + + pending_events.push(MessageSendEvent::SendReplyChannelRange { + node_id: their_node_id.clone(), + msg: ReplyChannelRange { + chain_hash: msg.chain_hash.clone(), + first_blocknum, + number_of_blocks, + sync_complete, + short_channel_ids: batch, + } + }); } + + Ok(()) + } + + fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { + // TODO + Err(LightningError { + err: String::from("Not implemented"), + action: ErrorAction::IgnoreError, + }) + } +} + +impl MessageSendEventsProvider for NetGraphMsgHandler +where + C::Target: chain::Access, + L::Target: Logger, +{ + fn get_and_clear_pending_msg_events(&self) -> Vec { + let mut ret = Vec::new(); + let mut pending_events = self.pending_events.lock().unwrap(); + core::mem::swap(&mut ret, &mut pending_events); + ret } } -#[derive(PartialEq, Debug)] +#[derive(Clone, Debug, PartialEq)] /// Details about one direction of a channel. Received /// within a channel update. pub struct DirectionalChannelInfo { @@ -223,7 +469,7 @@ pub struct DirectionalChannelInfo { /// Mostly redundant with the data we store in fields explicitly. /// Everything else is useful only for sending out for initial routing sync. /// Not stored if contains excess data to prevent DoS. - pub last_update_message: Option, + pub last_update_message: Option, } impl fmt::Display for DirectionalChannelInfo { @@ -233,17 +479,17 @@ impl fmt::Display for DirectionalChannelInfo { } } -impl_writeable!(DirectionalChannelInfo, 0, { - last_update, - enabled, - cltv_expiry_delta, - htlc_minimum_msat, - htlc_maximum_msat, - fees, - last_update_message +impl_writeable_tlv_based!(DirectionalChannelInfo, { + (0, last_update, required), + (2, enabled, required), + (4, cltv_expiry_delta, required), + (6, htlc_minimum_msat, required), + (8, htlc_maximum_msat, required), + (10, fees, required), + (12, last_update_message, required), }); -#[derive(PartialEq)] +#[derive(Clone, Debug, PartialEq)] /// Details about a channel (both directions). /// Received within a channel announcement. pub struct ChannelInfo { @@ -263,7 +509,7 @@ pub struct ChannelInfo { /// Mostly redundant with the data we store in fields explicitly. /// Everything else is useful only for sending out for initial routing sync. /// Not stored if contains excess data to prevent DoS. - pub announcement_message: Option, + pub announcement_message: Option, } impl fmt::Display for ChannelInfo { @@ -274,14 +520,14 @@ impl fmt::Display for ChannelInfo { } } -impl_writeable!(ChannelInfo, 0, { - features, - node_one, - one_to_two, - node_two, - two_to_one, - capacity_sats, - announcement_message +impl_writeable_tlv_based!(ChannelInfo, { + (0, features, required), + (2, node_one, required), + (4, one_to_two, required), + (6, node_two, required), + (8, two_to_one, required), + (10, capacity_sats, required), + (12, announcement_message, required), }); @@ -295,26 +541,12 @@ pub struct RoutingFees { pub proportional_millionths: u32, } -impl Readable for RoutingFees{ - fn read(reader: &mut R) -> Result { - let base_msat: u32 = Readable::read(reader)?; - let proportional_millionths: u32 = Readable::read(reader)?; - Ok(RoutingFees { - base_msat, - proportional_millionths, - }) - } -} - -impl Writeable for RoutingFees { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - self.base_msat.write(writer)?; - self.proportional_millionths.write(writer)?; - Ok(()) - } -} +impl_writeable_tlv_based!(RoutingFees, { + (0, base_msat, required), + (2, proportional_millionths, required) +}); -#[derive(PartialEq, Debug)] +#[derive(Clone, Debug, PartialEq)] /// Information received in the latest node_announcement from this node. pub struct NodeAnnouncementInfo { /// Protocol features the node announced support for @@ -334,53 +566,19 @@ pub struct NodeAnnouncementInfo { /// Mostly redundant with the data we store in fields explicitly. /// Everything else is useful only for sending out for initial routing sync. /// Not stored if contains excess data to prevent DoS. - pub announcement_message: Option + pub announcement_message: Option } -impl Writeable for NodeAnnouncementInfo { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - self.features.write(writer)?; - self.last_update.write(writer)?; - self.rgb.write(writer)?; - self.alias.write(writer)?; - (self.addresses.len() as u64).write(writer)?; - for ref addr in &self.addresses { - addr.write(writer)?; - } - self.announcement_message.write(writer)?; - Ok(()) - } -} - -impl Readable for NodeAnnouncementInfo { - fn read(reader: &mut R) -> Result { - let features = Readable::read(reader)?; - let last_update = Readable::read(reader)?; - let rgb = Readable::read(reader)?; - let alias = Readable::read(reader)?; - let addresses_count: u64 = Readable::read(reader)?; - let mut addresses = Vec::with_capacity(cmp::min(addresses_count, MAX_ALLOC_SIZE / 40) as usize); - for _ in 0..addresses_count { - match Readable::read(reader) { - Ok(Ok(addr)) => { addresses.push(addr); }, - Ok(Err(_)) => return Err(DecodeError::InvalidValue), - Err(DecodeError::ShortRead) => return Err(DecodeError::BadLengthDescriptor), - _ => unreachable!(), - } - } - let announcement_message = Readable::read(reader)?; - Ok(NodeAnnouncementInfo { - features, - last_update, - rgb, - alias, - addresses, - announcement_message - }) - } -} +impl_writeable_tlv_based!(NodeAnnouncementInfo, { + (0, features, required), + (2, last_update, required), + (4, rgb, required), + (6, alias, required), + (8, announcement_message, option), + (10, addresses, vec_type), +}); -#[derive(PartialEq)] +#[derive(Clone, Debug, PartialEq)] /// Details about a node in the network, known from the network announcement. pub struct NodeInfo { /// All valid channels a node has announced @@ -403,46 +601,20 @@ impl fmt::Display for NodeInfo { } } -impl Writeable for NodeInfo { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - (self.channels.len() as u64).write(writer)?; - for ref chan in self.channels.iter() { - chan.write(writer)?; - } - self.lowest_inbound_channel_fees.write(writer)?; - self.announcement_info.write(writer)?; - Ok(()) - } -} - -const MAX_ALLOC_SIZE: u64 = 64*1024; - -impl Readable for NodeInfo { - fn read(reader: &mut R) -> Result { - let channels_count: u64 = Readable::read(reader)?; - let mut channels = Vec::with_capacity(cmp::min(channels_count, MAX_ALLOC_SIZE / 8) as usize); - for _ in 0..channels_count { - channels.push(Readable::read(reader)?); - } - let lowest_inbound_channel_fees = Readable::read(reader)?; - let announcement_info = Readable::read(reader)?; - Ok(NodeInfo { - channels, - lowest_inbound_channel_fees, - announcement_info, - }) - } -} +impl_writeable_tlv_based!(NodeInfo, { + (0, lowest_inbound_channel_fees, option), + (2, announcement_info, option), + (4, channels, vec_type), +}); -/// Represents the network as nodes and channels between them -#[derive(PartialEq)] -pub struct NetworkGraph { - channels: BTreeMap, - nodes: BTreeMap, -} +const SERIALIZATION_VERSION: u8 = 1; +const MIN_SERIALIZATION_VERSION: u8 = 1; impl Writeable for NetworkGraph { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); + + self.genesis_hash.write(writer)?; (self.channels.len() as u64).write(writer)?; for (ref chan_id, ref chan_info) in self.channels.iter() { (*chan_id).write(writer)?; @@ -453,12 +625,17 @@ impl Writeable for NetworkGraph { node_id.write(writer)?; node_info.write(writer)?; } + + write_tlv_fields!(writer, {}); Ok(()) } } impl Readable for NetworkGraph { fn read(reader: &mut R) -> Result { + let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + + let genesis_hash: BlockHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; let mut channels = BTreeMap::new(); for _ in 0..channels_count { @@ -473,7 +650,10 @@ impl Readable for NetworkGraph { let node_info = Readable::read(reader)?; nodes.insert(node_id, node_info); } + read_tlv_fields!(reader, {}); + Ok(NetworkGraph { + genesis_hash, channels, nodes, }) @@ -482,13 +662,13 @@ impl Readable for NetworkGraph { impl fmt::Display for NetworkGraph { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "Network map\n[Channels]\n")?; + writeln!(f, "Network map\n[Channels]")?; for (key, val) in self.channels.iter() { - write!(f, " {}: {}\n", key, val)?; + writeln!(f, " {}: {}", key, val)?; } - write!(f, "[Nodes]\n")?; + writeln!(f, "[Nodes]")?; for (key, val) in self.nodes.iter() { - write!(f, " {}: {}\n", log_pubkey!(key), val)?; + writeln!(f, " {}: {}", log_pubkey!(key), val)?; } Ok(()) } @@ -496,13 +676,19 @@ impl fmt::Display for NetworkGraph { impl NetworkGraph { /// Returns all known valid channels' short ids along with announced channel info. + /// + /// (C-not exported) because we have no mapping for `BTreeMap`s pub fn get_channels<'a>(&'a self) -> &'a BTreeMap { &self.channels } /// Returns all known nodes' public keys along with announced node info. + /// + /// (C-not exported) because we have no mapping for `BTreeMap`s pub fn get_nodes<'a>(&'a self) -> &'a BTreeMap { &self.nodes } /// Get network addresses by node id. /// Returns None if the requested node is completely unknown, /// or if node announcement for the node was never received. + /// + /// (C-not exported) as there is no practical way to track lifetimes of returned values. pub fn get_addresses<'a>(&'a self, pubkey: &PublicKey) -> Option<&'a Vec> { if let Some(node) = self.nodes.get(pubkey) { if let Some(node_info) = node.announcement_info.as_ref() { @@ -513,73 +699,145 @@ impl NetworkGraph { } /// Creates a new, empty, network graph. - pub fn new() -> NetworkGraph { + pub fn new(genesis_hash: BlockHash) -> NetworkGraph { Self { + genesis_hash, channels: BTreeMap::new(), nodes: BTreeMap::new(), } } - /// For an already known node (from channel announcements), update its stored properties from a given node announcement - /// Announcement signatures are checked here only if Secp256k1 object is provided. - fn update_node_from_announcement(&mut self, msg: &msgs::NodeAnnouncement, secp_ctx: Option<&Secp256k1>) -> Result { - if let Some(sig_verifier) = secp_ctx { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(sig_verifier, &msg_hash, &msg.signature, &msg.contents.node_id); - } + /// For an already known node (from channel announcements), update its stored properties from a + /// given node announcement. + /// + /// You probably don't want to call this directly, instead relying on a NetGraphMsgHandler's + /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept + /// routing messages from a source using a protocol other than the lightning P2P protocol. + pub fn update_node_from_announcement(&mut self, msg: &msgs::NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { + let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id); + self.update_node_from_announcement_intern(&msg.contents, Some(&msg)) + } + + /// For an already known node (from channel announcements), update its stored properties from a + /// given node announcement without verifying the associated signatures. Because we aren't + /// given the associated signatures here we cannot relay the node announcement to any of our + /// peers. + pub fn update_node_from_unsigned_announcement(&mut self, msg: &msgs::UnsignedNodeAnnouncement) -> Result<(), LightningError> { + self.update_node_from_announcement_intern(msg, None) + } - match self.nodes.get_mut(&msg.contents.node_id) { + fn update_node_from_announcement_intern(&mut self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>) -> Result<(), LightningError> { + match self.nodes.get_mut(&msg.node_id) { None => Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}), Some(node) => { if let Some(node_info) = node.announcement_info.as_ref() { - if node_info.last_update >= msg.contents.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + if node_info.last_update >= msg.timestamp { + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); } } - let should_relay = msg.contents.excess_data.is_empty() && msg.contents.excess_address_data.is_empty(); + let should_relay = + msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && + msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && + msg.excess_data.len() + msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY; node.announcement_info = Some(NodeAnnouncementInfo { - features: msg.contents.features.clone(), - last_update: msg.contents.timestamp, - rgb: msg.contents.rgb, - alias: msg.contents.alias, - addresses: msg.contents.addresses.clone(), - announcement_message: if should_relay { Some(msg.clone()) } else { None }, + features: msg.features.clone(), + last_update: msg.timestamp, + rgb: msg.rgb, + alias: msg.alias, + addresses: msg.addresses.clone(), + announcement_message: if should_relay { full_msg.cloned() } else { None }, }); - Ok(should_relay) + Ok(()) } } } - /// For a new or already known (from previous announcement) channel, store or update channel info. - /// Also store nodes (if not stored yet) the channel is between, and make node aware of this channel. - /// Checking utxo on-chain is useful if we receive an update for already known channel id, - /// which is probably result of a reorg. In that case, we update channel info only if the - /// utxo was checked, otherwise stick to the existing update, to prevent DoS risks. - /// Announcement signatures are checked here only if Secp256k1 object is provided. - fn update_channel_from_announcement(&mut self, msg: &msgs::ChannelAnnouncement, utxo_value: Option, secp_ctx: Option<&Secp256k1>) -> Result { - if let Some(sig_verifier) = secp_ctx { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - secp_verify_sig!(sig_verifier, &msg_hash, &msg.node_signature_1, &msg.contents.node_id_1); - secp_verify_sig!(sig_verifier, &msg_hash, &msg.node_signature_2, &msg.contents.node_id_2); - secp_verify_sig!(sig_verifier, &msg_hash, &msg.bitcoin_signature_1, &msg.contents.bitcoin_key_1); - secp_verify_sig!(sig_verifier, &msg_hash, &msg.bitcoin_signature_2, &msg.contents.bitcoin_key_2); + /// Store or update channel info from a channel announcement. + /// + /// You probably don't want to call this directly, instead relying on a NetGraphMsgHandler's + /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept + /// routing messages from a source using a protocol other than the lightning P2P protocol. + /// + /// If a `chain::Access` object is provided via `chain_access`, it will be called to verify + /// the corresponding UTXO exists on chain and is correctly-formatted. + pub fn update_channel_from_announcement + (&mut self, msg: &msgs::ChannelAnnouncement, chain_access: &Option, secp_ctx: &Secp256k1) + -> Result<(), LightningError> + where C::Target: chain::Access { + let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &msg.contents.node_id_1); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &msg.contents.node_id_2); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &msg.contents.bitcoin_key_1); + secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &msg.contents.bitcoin_key_2); + self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), chain_access) + } + + /// Store or update channel info from a channel announcement without verifying the associated + /// signatures. Because we aren't given the associated signatures here we cannot relay the + /// channel announcement to any of our peers. + /// + /// If a `chain::Access` object is provided via `chain_access`, it will be called to verify + /// the corresponding UTXO exists on chain and is correctly-formatted. + pub fn update_channel_from_unsigned_announcement + (&mut self, msg: &msgs::UnsignedChannelAnnouncement, chain_access: &Option) + -> Result<(), LightningError> + where C::Target: chain::Access { + self.update_channel_from_unsigned_announcement_intern(msg, None, chain_access) + } + + fn update_channel_from_unsigned_announcement_intern + (&mut self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, chain_access: &Option) + -> Result<(), LightningError> + where C::Target: chain::Access { + if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 { + return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); } - let should_relay = msg.contents.excess_data.is_empty(); + let utxo_value = match &chain_access { + &None => { + // Tentatively accept, potentially exposing us to DoS attacks + None + }, + &Some(ref chain_access) => { + match chain_access.get_utxo(&msg.chain_hash, msg.short_channel_id) { + Ok(TxOut { value, script_pubkey }) => { + let expected_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2) + .push_slice(&msg.bitcoin_key_1.serialize()) + .push_slice(&msg.bitcoin_key_2.serialize()) + .push_opcode(opcodes::all::OP_PUSHNUM_2) + .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); + if script_pubkey != expected_script { + return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", script_pubkey.to_hex(), expected_script.to_hex()), action: ErrorAction::IgnoreError}); + } + //TODO: Check if value is worth storing, use it to inform routing, and compare it + //to the new HTLC max field in channel_update + Some(value) + }, + Err(chain::AccessError::UnknownChain) => { + return Err(LightningError{err: format!("Channel announced on an unknown chain ({})", msg.chain_hash.encode().to_hex()), action: ErrorAction::IgnoreError}); + }, + Err(chain::AccessError::UnknownTx) => { + return Err(LightningError{err: "Channel announced without corresponding UTXO entry".to_owned(), action: ErrorAction::IgnoreError}); + }, + } + }, + }; let chan_info = ChannelInfo { - features: msg.contents.features.clone(), - node_one: msg.contents.node_id_1.clone(), + features: msg.features.clone(), + node_one: msg.node_id_1.clone(), one_to_two: None, - node_two: msg.contents.node_id_2.clone(), + node_two: msg.node_id_2.clone(), two_to_one: None, capacity_sats: utxo_value, - announcement_message: if should_relay { Some(msg.clone()) } else { None }, + announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY + { full_msg.cloned() } else { None }, }; - match self.channels.entry(msg.contents.short_channel_id) { + match self.channels.entry(msg.short_channel_id) { BtreeEntry::Occupied(mut entry) => { //TODO: because asking the blockchain if short_channel_id is valid is only optional //in the blockchain API, we need to handle it smartly here, though it's unclear @@ -593,10 +851,10 @@ impl NetworkGraph { // b) we don't track UTXOs of channels we know about and remove them if they // get reorg'd out. // c) it's unclear how to do so without exposing ourselves to massive DoS risk. - Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.contents.short_channel_id); + Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.short_channel_id); *entry.get_mut() = chan_info; } else { - return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError}) + return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}) } }, BtreeEntry::Vacant(entry) => { @@ -608,11 +866,11 @@ impl NetworkGraph { ( $node_id: expr ) => { match self.nodes.entry($node_id) { BtreeEntry::Occupied(node_entry) => { - node_entry.into_mut().channels.push(msg.contents.short_channel_id); + node_entry.into_mut().channels.push(msg.short_channel_id); }, BtreeEntry::Vacant(node_entry) => { node_entry.insert(NodeInfo { - channels: vec!(msg.contents.short_channel_id), + channels: vec!(msg.short_channel_id), lowest_inbound_channel_fees: None, announcement_info: None, }); @@ -621,10 +879,10 @@ impl NetworkGraph { }; } - add_channel_to_node!(msg.contents.node_id_1); - add_channel_to_node!(msg.contents.node_id_2); + add_channel_to_node!(msg.node_id_1); + add_channel_to_node!(msg.node_id_2); - Ok(should_relay) + Ok(()) } /// Close a channel if a corresponding HTLC fail was sent. @@ -656,42 +914,67 @@ impl NetworkGraph { } } - /// For an already known (from announcement) channel, update info about one of the directions of a channel. - /// Announcement signatures are checked here only if Secp256k1 object is provided. - fn update_channel(&mut self, msg: &msgs::ChannelUpdate, secp_ctx: Option<&Secp256k1>) -> Result { + /// For an already known (from announcement) channel, update info about one of the directions + /// of the channel. + /// + /// You probably don't want to call this directly, instead relying on a NetGraphMsgHandler's + /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept + /// routing messages from a source using a protocol other than the lightning P2P protocol. + pub fn update_channel(&mut self, msg: &msgs::ChannelUpdate, secp_ctx: &Secp256k1) -> Result<(), LightningError> { + self.update_channel_intern(&msg.contents, Some(&msg), Some((&msg.signature, secp_ctx))) + } + + /// For an already known (from announcement) channel, update info about one of the directions + /// of the channel without verifying the associated signatures. Because we aren't given the + /// associated signatures here we cannot relay the channel update to any of our peers. + pub fn update_channel_unsigned(&mut self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { + self.update_channel_intern(msg, None, None::<(&secp256k1::Signature, &Secp256k1)>) + } + + fn update_channel_intern(&mut self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig_info: Option<(&secp256k1::Signature, &Secp256k1)>) -> Result<(), LightningError> { let dest_node_id; - let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1); + let chan_enabled = msg.flags & (1 << 1) != (1 << 1); let chan_was_enabled; - match self.channels.get_mut(&msg.contents.short_channel_id) { + match self.channels.get_mut(&msg.short_channel_id) { None => return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}), Some(channel) => { + if let OptionalField::Present(htlc_maximum_msat) = msg.htlc_maximum_msat { + if htlc_maximum_msat > MAX_VALUE_MSAT { + return Err(LightningError{err: "htlc_maximum_msat is larger than maximum possible msats".to_owned(), action: ErrorAction::IgnoreError}); + } + + if let Some(capacity_sats) = channel.capacity_sats { + // It's possible channel capacity is available now, although it wasn't available at announcement (so the field is None). + // Don't query UTXO set here to reduce DoS risks. + if capacity_sats > MAX_VALUE_MSAT / 1000 || htlc_maximum_msat > capacity_sats * 1000 { + return Err(LightningError{err: "htlc_maximum_msat is larger than channel capacity or capacity is bogus".to_owned(), action: ErrorAction::IgnoreError}); + } + } + } macro_rules! maybe_update_channel_info { ( $target: expr, $src_node: expr) => { if let Some(existing_chan_info) = $target.as_ref() { - if existing_chan_info.last_update >= msg.contents.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + if existing_chan_info.last_update >= msg.timestamp { + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); } chan_was_enabled = existing_chan_info.enabled; } else { chan_was_enabled = false; } - let last_update_message = if msg.contents.excess_data.is_empty() { - Some(msg.clone()) - } else { - None - }; + let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY + { full_msg.cloned() } else { None }; let updated_channel_dir_info = DirectionalChannelInfo { enabled: chan_enabled, - last_update: msg.contents.timestamp, - cltv_expiry_delta: msg.contents.cltv_expiry_delta, - htlc_minimum_msat: msg.contents.htlc_minimum_msat, - htlc_maximum_msat: if let OptionalField::Present(max_value) = msg.contents.htlc_maximum_msat { Some(max_value) } else { None }, + last_update: msg.timestamp, + cltv_expiry_delta: msg.cltv_expiry_delta, + htlc_minimum_msat: msg.htlc_minimum_msat, + htlc_maximum_msat: if let OptionalField::Present(max_value) = msg.htlc_maximum_msat { Some(max_value) } else { None }, fees: RoutingFees { - base_msat: msg.contents.fee_base_msat, - proportional_millionths: msg.contents.fee_proportional_millionths, + base_msat: msg.fee_base_msat, + proportional_millionths: msg.fee_proportional_millionths, }, last_update_message }; @@ -699,17 +982,17 @@ impl NetworkGraph { } } - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); - if msg.contents.flags & 1 == 1 { + let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); + if msg.flags & 1 == 1 { dest_node_id = channel.node_one.clone(); - if let Some(sig_verifier) = secp_ctx { - secp_verify_sig!(sig_verifier, &msg_hash, &msg.signature, &channel.node_two); + if let Some((sig, ctx)) = sig_info { + secp_verify_sig!(ctx, &msg_hash, &sig, &channel.node_two); } maybe_update_channel_info!(channel.two_to_one, channel.node_two); } else { dest_node_id = channel.node_two.clone(); - if let Some(sig_verifier) = secp_ctx { - secp_verify_sig!(sig_verifier, &msg_hash, &msg.signature, &channel.node_one); + if let Some((sig, ctx)) = sig_info { + secp_verify_sig!(ctx, &msg_hash, &sig, &channel.node_one); } maybe_update_channel_info!(channel.one_to_two, channel.node_one); } @@ -718,8 +1001,8 @@ impl NetworkGraph { if chan_enabled { let node = self.nodes.get_mut(&dest_node_id).unwrap(); - let mut base_msat = msg.contents.fee_base_msat; - let mut proportional_millionths = msg.contents.fee_proportional_millionths; + let mut base_msat = msg.fee_base_msat; + let mut proportional_millionths = msg.fee_proportional_millionths; if let Some(fees) = node.lowest_inbound_channel_fees { base_msat = cmp::min(base_msat, fees.base_msat); proportional_millionths = cmp::min(proportional_millionths, fees.proportional_millionths); @@ -753,7 +1036,7 @@ impl NetworkGraph { node.lowest_inbound_channel_fees = lowest_inbound_channel_fees; } - Ok(msg.contents.excess_data.is_empty()) + Ok(()) } fn remove_channel_in_nodes(nodes: &mut BTreeMap, chan: &ChannelInfo, short_channel_id: u64) { @@ -779,35 +1062,39 @@ impl NetworkGraph { #[cfg(test)] mod tests { - use chain::chaininterface; - use ln::features::{ChannelFeatures, NodeFeatures}; - use routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; - use ln::msgs::{OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, - UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, HTLCFailChannelUpdate}; + use chain; + use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; + use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, MAX_EXCESS_BYTES_FOR_RELAY}; + use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, + UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, HTLCFailChannelUpdate, + ReplyChannelRange, ReplyShortChannelIdsEnd, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; use util::test_utils; use util::logger::Logger; use util::ser::{Readable, Writeable}; + use util::events::{MessageSendEvent, MessageSendEventsProvider}; + use util::scid_utils::scid_from_parts; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; use bitcoin::network::constants::Network; use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::script::Builder; + use bitcoin::blockdata::transaction::TxOut; use bitcoin::blockdata::opcodes; - use bitcoin::util::hash::BitcoinHash; use hex; use bitcoin::secp256k1::key::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; - use std::sync::Arc; + use prelude::*; + use sync::Arc; - fn create_net_graph_msg_handler() -> (Secp256k1, NetGraphMsgHandler, Arc>) { + fn create_net_graph_msg_handler() -> (Secp256k1, NetGraphMsgHandler, Arc>) { let secp_ctx = Secp256k1::new(); let logger = Arc::new(test_utils::TestLogger::new()); - let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet)); - let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor, Arc::clone(&logger)); + let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); + let net_graph_msg_handler = NetGraphMsgHandler::new(genesis_hash, None, Arc::clone(&logger)); (secp_ctx, net_graph_msg_handler) } @@ -862,7 +1149,7 @@ mod tests { // Announce a channel to add a corresponding node. let unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::known(), - chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(), + chain_hash: genesis_block(Network::Testnet).header.block_hash(), short_channel_id: 0, node_id_1, node_id_2, @@ -901,7 +1188,7 @@ mod tests { }; unsigned_announcement.timestamp += 1000; - unsigned_announcement.excess_data.push(1); + unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let announcement_with_data = NodeAnnouncement { signature: secp_ctx.sign(&msghash, node_1_privkey), @@ -932,9 +1219,6 @@ mod tests { fn handling_channel_announcements() { let secp_ctx = Secp256k1::new(); let logger: Arc = Arc::new(test_utils::TestLogger::new()); - let chain_monitor = Arc::new(test_utils::TestChainWatcher::new()); - let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor.clone(), Arc::clone(&logger)); - let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); @@ -952,7 +1236,7 @@ mod tests { let mut unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::known(), - chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(), + chain_hash: genesis_block(Network::Testnet).header.block_hash(), short_channel_id: 0, node_id_1, node_id_2, @@ -971,8 +1255,7 @@ mod tests { }; // Test if the UTXO lookups were not supported - *chain_monitor.utxo_ret.lock().unwrap() = Err(chaininterface::ChainError::NotSupported); - + let mut net_graph_msg_handler = NetGraphMsgHandler::new(genesis_block(Network::Testnet).header.block_hash(), None, Arc::clone(&logger)); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() @@ -986,7 +1269,6 @@ mod tests { } } - // If we receive announcement for the same channel (with UTXO lookups disabled), // drop new one on the floor, since we can't see any changes. match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { @@ -994,9 +1276,10 @@ mod tests { Err(e) => assert_eq!(e.err, "Already have knowledge of channel") }; - // Test if an associated transaction were not on-chain (or not confirmed). - *chain_monitor.utxo_ret.lock().unwrap() = Err(chaininterface::ChainError::UnknownTx); + let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); + *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx); + net_graph_msg_handler = NetGraphMsgHandler::new(chain_source.clone().genesis_hash, Some(chain_source.clone()), Arc::clone(&logger)); unsigned_announcement.short_channel_id += 1; msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); @@ -1013,10 +1296,9 @@ mod tests { Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry") }; - // Now test if the transaction is found in the UTXO set and the script is correct. unsigned_announcement.short_channel_id += 1; - *chain_monitor.utxo_ret.lock().unwrap() = Ok((good_script.clone(), 0)); + *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script.clone() }); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { @@ -1041,14 +1323,14 @@ mod tests { // If we receive announcement for the same channel (but TX is not confirmed), // drop new one on the floor, since we can't see any changes. - *chain_monitor.utxo_ret.lock().unwrap() = Err(chaininterface::ChainError::UnknownTx); + *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry") }; // But if it is confirmed, replace the channel - *chain_monitor.utxo_ret.lock().unwrap() = Ok((good_script, 0)); + *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script }); unsigned_announcement.features = ChannelFeatures::empty(); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { @@ -1074,7 +1356,7 @@ mod tests { // Don't relay valid channels with excess data unsigned_announcement.short_channel_id += 1; - unsigned_announcement.excess_data.push(1); + unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), @@ -1104,8 +1386,8 @@ mod tests { unsigned_announcement.node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let channel_to_itself_announcement = ChannelAnnouncement { - node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), - node_signature_2: secp_ctx.sign(&msghash, node_1_privkey), + node_signature_1: secp_ctx.sign(&msghash, node_2_privkey), + node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), @@ -1118,7 +1400,11 @@ mod tests { #[test] fn handling_channel_update() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let secp_ctx = Secp256k1::new(); + let logger: Arc = Arc::new(test_utils::TestLogger::new()); + let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); + let net_graph_msg_handler = NetGraphMsgHandler::new(genesis_block(Network::Testnet).header.block_hash(), Some(chain_source.clone()), Arc::clone(&logger)); + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); @@ -1128,9 +1414,17 @@ mod tests { let zero_hash = Sha256dHash::hash(&[0; 32]); let short_channel_id = 0; - let chain_hash = genesis_block(Network::Testnet).header.bitcoin_hash(); + let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let amount_sats = 1000_000; + { // Announce a channel we will update + let good_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2) + .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_1_btckey).serialize()) + .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey).serialize()) + .push_opcode(opcodes::all::OP_PUSHNUM_2) + .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); + *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: amount_sats, script_pubkey: good_script.clone() }); let unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::empty(), chain_hash, @@ -1192,7 +1486,7 @@ mod tests { } unsigned_channel_update.timestamp += 100; - unsigned_channel_update.excess_data.push(1); + unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0); let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), @@ -1218,6 +1512,31 @@ mod tests { }; unsigned_channel_update.short_channel_id = short_channel_id; + unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(MAX_VALUE_MSAT + 1); + let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); + let valid_channel_update = ChannelUpdate { + signature: secp_ctx.sign(&msghash, node_1_privkey), + contents: unsigned_channel_update.clone() + }; + + match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { + Ok(_) => panic!(), + Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than maximum possible msats") + }; + unsigned_channel_update.htlc_maximum_msat = OptionalField::Absent; + + unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(amount_sats * 1000 + 1); + let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); + let valid_channel_update = ChannelUpdate { + signature: secp_ctx.sign(&msghash, node_1_privkey), + contents: unsigned_channel_update.clone() + }; + + match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { + Ok(_) => panic!(), + Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than channel capacity or capacity is bogus") + }; + unsigned_channel_update.htlc_maximum_msat = OptionalField::Absent; // Even though previous update was not relayed further, we still accepted it, // so we now won't accept update before the previous one. @@ -1258,7 +1577,7 @@ mod tests { let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); let short_channel_id = 0; - let chain_hash = genesis_block(Network::Testnet).header.bitcoin_hash(); + let chain_hash = genesis_block(Network::Testnet).header.block_hash(); { // There is no nodes in the table at the beginning. @@ -1373,7 +1692,7 @@ mod tests { let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); let short_channel_id = 1; - let chain_hash = genesis_block(Network::Testnet).header.bitcoin_hash(); + let chain_hash = genesis_block(Network::Testnet).header.block_hash(); // Channels were not announced yet. let channels_with_announcements = net_graph_msg_handler.get_next_channel_announcements(0, 1); @@ -1467,7 +1786,7 @@ mod tests { htlc_maximum_msat: OptionalField::Absent, fee_base_msat: 10000, fee_proportional_millionths: 20, - excess_data: [1; 3].to_vec() + excess_data: [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec() }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { @@ -1507,7 +1826,7 @@ mod tests { let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); let short_channel_id = 1; - let chain_hash = genesis_block(Network::Testnet).header.bitcoin_hash(); + let chain_hash = genesis_block(Network::Testnet).header.block_hash(); // No nodes yet. let next_announcements = net_graph_msg_handler.get_next_node_announcements(None, 10); @@ -1596,7 +1915,7 @@ mod tests { alias: [0; 32], addresses: Vec::new(), excess_address_data: Vec::new(), - excess_data: [1; 3].to_vec(), + excess_data: [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = NodeAnnouncement { @@ -1627,7 +1946,7 @@ mod tests { let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); let unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::known(), - chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(), + chain_hash: genesis_block(Network::Testnet).header.block_hash(), short_channel_id: 0, node_id_1, node_id_2, @@ -1679,4 +1998,490 @@ mod tests { network.write(&mut w).unwrap(); assert!(::read(&mut ::std::io::Cursor::new(&w.0)).unwrap() == *network); } + + #[test] + fn calling_sync_routing_table() { + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); + + let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let first_blocknum = 0; + let number_of_blocks = 0xffff_ffff; + + // It should ignore if gossip_queries feature is not enabled + { + let init_msg = Init { features: InitFeatures::known().clear_gossip_queries() }; + net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg); + let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + } + + // It should send a query_channel_message with the correct information + { + let init_msg = Init { features: InitFeatures::known() }; + net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg); + let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::SendChannelRangeQuery{ node_id, msg } => { + assert_eq!(node_id, &node_id_1); + assert_eq!(msg.chain_hash, chain_hash); + assert_eq!(msg.first_blocknum, first_blocknum); + assert_eq!(msg.number_of_blocks, number_of_blocks); + }, + _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery") + }; + } + + // It should not enqueue a query when should_request_full_sync return false. + // The initial implementation allows syncing with the first 5 peers after + // which should_request_full_sync will return false + { + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let init_msg = Init { features: InitFeatures::known() }; + for n in 1..7 { + let node_privkey = &SecretKey::from_slice(&[n; 32]).unwrap(); + let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); + net_graph_msg_handler.sync_routing_table(&node_id, &init_msg); + let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); + if n <= 5 { + assert_eq!(events.len(), 1); + } else { + assert_eq!(events.len(), 0); + } + + } + } + } + + #[test] + fn handling_reply_channel_range() { + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); + + let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + + // Test receipt of a single reply that should enqueue an SCID query + // matching the SCIDs in the reply + { + let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange { + chain_hash, + sync_complete: true, + first_blocknum: 0, + number_of_blocks: 2000, + short_channel_ids: vec![ + 0x0003e0_000000_0000, // 992x0x0 + 0x0003e8_000000_0000, // 1000x0x0 + 0x0003e9_000000_0000, // 1001x0x0 + 0x0003f0_000000_0000, // 1008x0x0 + 0x00044c_000000_0000, // 1100x0x0 + 0x0006e0_000000_0000, // 1760x0x0 + ], + }); + assert!(result.is_ok()); + + // We expect to emit a query_short_channel_ids message with the received scids + let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::SendShortIdsQuery { node_id, msg } => { + assert_eq!(node_id, &node_id_1); + assert_eq!(msg.chain_hash, chain_hash); + assert_eq!(msg.short_channel_ids, vec![ + 0x0003e0_000000_0000, // 992x0x0 + 0x0003e8_000000_0000, // 1000x0x0 + 0x0003e9_000000_0000, // 1001x0x0 + 0x0003f0_000000_0000, // 1008x0x0 + 0x00044c_000000_0000, // 1100x0x0 + 0x0006e0_000000_0000, // 1760x0x0 + ]); + }, + _ => panic!("expected MessageSendEvent::SendShortIdsQuery"), + } + } + } + + #[test] + fn handling_reply_short_channel_ids() { + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); + + let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + + // Test receipt of a successful reply + { + let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd { + chain_hash, + full_information: true, + }); + assert!(result.is_ok()); + } + + // Test receipt of a reply that indicates the peer does not maintain up-to-date information + // for the chain_hash requested in the query. + { + let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd { + chain_hash, + full_information: false, + }); + assert!(result.is_err()); + assert_eq!(result.err().unwrap().err, "Received reply_short_channel_ids_end with no information"); + } + } + + #[test] + fn handling_query_channel_range() { + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + + let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); + let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); + let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); + let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); + let bitcoin_key_1 = PublicKey::from_secret_key(&secp_ctx, node_1_btckey); + let bitcoin_key_2 = PublicKey::from_secret_key(&secp_ctx, node_2_btckey); + + let mut scids: Vec = vec![ + scid_from_parts(0xfffffe, 0xffffff, 0xffff).unwrap(), // max + scid_from_parts(0xffffff, 0xffffff, 0xffff).unwrap(), // never + ]; + + // used for testing multipart reply across blocks + for block in 100000..=108001 { + scids.push(scid_from_parts(block, 0, 0).unwrap()); + } + + // used for testing resumption on same block + scids.push(scid_from_parts(108001, 1, 0).unwrap()); + + for scid in scids { + let unsigned_announcement = UnsignedChannelAnnouncement { + features: ChannelFeatures::known(), + chain_hash: chain_hash.clone(), + short_channel_id: scid, + node_id_1, + node_id_2, + bitcoin_key_1, + bitcoin_key_2, + excess_data: Vec::new(), + }; + + let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); + let valid_announcement = ChannelAnnouncement { + node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), + node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), + bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), + bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), + contents: unsigned_announcement.clone(), + }; + match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { + Ok(_) => (), + _ => panic!() + }; + } + + // Error when number_of_blocks=0 + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 0, + }, + false, + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 0, + sync_complete: true, + short_channel_ids: vec![] + }] + ); + + // Error when wrong chain + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + first_blocknum: 0, + number_of_blocks: 0xffff_ffff, + }, + false, + vec![ReplyChannelRange { + chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + first_blocknum: 0, + number_of_blocks: 0xffff_ffff, + sync_complete: true, + short_channel_ids: vec![], + }] + ); + + // Error when first_blocknum > 0xffffff + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0x01000000, + number_of_blocks: 0xffff_ffff, + }, + false, + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0x01000000, + number_of_blocks: 0xffff_ffff, + sync_complete: true, + short_channel_ids: vec![] + }] + ); + + // Empty reply when max valid SCID block num + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xffffff, + number_of_blocks: 1, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xffffff, + number_of_blocks: 1, + sync_complete: true, + short_channel_ids: vec![] + }, + ] + ); + + // No results in valid query range + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 1000, + number_of_blocks: 1000, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 1000, + number_of_blocks: 1000, + sync_complete: true, + short_channel_ids: vec![], + } + ] + ); + + // Overflow first_blocknum + number_of_blocks + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xfe0000, + number_of_blocks: 0xffffffff, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xfe0000, + number_of_blocks: 0xffffffff - 0xfe0000, + sync_complete: true, + short_channel_ids: vec![ + 0xfffffe_ffffff_ffff, // max + ] + } + ] + ); + + // Single block exactly full + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 8000, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 8000, + sync_complete: true, + short_channel_ids: (100000..=107999) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }, + ] + ); + + // Multiple split on new block + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 8001, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 7999, + sync_complete: false, + short_channel_ids: (100000..=107999) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }, + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 107999, + number_of_blocks: 2, + sync_complete: true, + short_channel_ids: vec![ + scid_from_parts(108000, 0, 0).unwrap(), + ], + } + ] + ); + + // Multiple split on same block + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100002, + number_of_blocks: 8000, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100002, + number_of_blocks: 7999, + sync_complete: false, + short_channel_ids: (100002..=108001) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }, + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 108001, + number_of_blocks: 1, + sync_complete: true, + short_channel_ids: vec![ + scid_from_parts(108001, 1, 0).unwrap(), + ], + } + ] + ); + } + + fn do_handling_query_channel_range( + net_graph_msg_handler: &NetGraphMsgHandler, Arc>, + test_node_id: &PublicKey, + msg: QueryChannelRange, + expected_ok: bool, + expected_replies: Vec + ) { + let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1); + let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum; + let query_end_blocknum = msg.end_blocknum(); + let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg); + + if expected_ok { + assert!(result.is_ok()); + } else { + assert!(result.is_err()); + } + + let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), expected_replies.len()); + + for i in 0..events.len() { + let expected_reply = &expected_replies[i]; + match &events[i] { + MessageSendEvent::SendReplyChannelRange { node_id, msg } => { + assert_eq!(node_id, test_node_id); + assert_eq!(msg.chain_hash, expected_reply.chain_hash); + assert_eq!(msg.first_blocknum, expected_reply.first_blocknum); + assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks); + assert_eq!(msg.sync_complete, expected_reply.sync_complete); + assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids); + + // Enforce exactly the sequencing requirements present on c-lightning v0.9.3 + assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1)); + assert!(msg.first_blocknum >= max_firstblocknum); + max_firstblocknum = msg.first_blocknum; + c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks); + + // Check that the last block count is >= the query's end_blocknum + if i == events.len() - 1 { + assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum); + } + }, + _ => panic!("expected MessageSendEvent::SendReplyChannelRange"), + } + } + } + + #[test] + fn handling_query_short_channel_ids() { + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); + + let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + + let result = net_graph_msg_handler.handle_query_short_channel_ids(&node_id, QueryShortChannelIds { + chain_hash, + short_channel_ids: vec![0x0003e8_000000_0000], + }); + assert!(result.is_err()); + } +} + +#[cfg(all(test, feature = "unstable"))] +mod benches { + use super::*; + + use test::Bencher; + use std::io::Read; + + #[bench] + fn read_network_graph(bench: &mut Bencher) { + let mut d = ::routing::router::test_utils::get_route_file().unwrap(); + let mut v = Vec::new(); + d.read_to_end(&mut v).unwrap(); + bench.iter(|| { + let _ = NetworkGraph::read(&mut std::io::Cursor::new(&v)).unwrap(); + }); + } + + #[bench] + fn write_network_graph(bench: &mut Bencher) { + let mut d = ::routing::router::test_utils::get_route_file().unwrap(); + let net_graph = NetworkGraph::read(&mut d).unwrap(); + bench.iter(|| { + let _ = net_graph.encode(); + }); + } }