X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fnetwork_graph.rs;h=10c0ba57b58b05e2a6875795773e4a19188c6cb1;hb=refs%2Fheads%2F2021-11-fix-update-announcements;hp=7f0866030c2ff1a4de3c37866235fbdb92b0fe66;hpb=74f10076b25b7fcbd9656361234d69976e86cd1c;p=rust-lightning diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 7f086603..10c0ba57 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -9,6 +9,7 @@ //! The top-level network map tracking logic lives here. +use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; use bitcoin::secp256k1::key::PublicKey; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1; @@ -28,16 +29,17 @@ use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalFie use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; -use util::logger::Logger; -use util::events::{MessageSendEvent, MessageSendEventsProvider}; +use util::logger::{Logger, Level}; +use util::events::{Event, EventHandler, MessageSendEvent, MessageSendEventsProvider}; use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; +use io; use prelude::*; use alloc::collections::{BTreeMap, btree_map::Entry as BtreeEntry}; use core::{cmp, fmt}; -use std::sync::{RwLock, RwLockReadGuard}; +use sync::{RwLock, RwLockReadGuard}; use core::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Mutex; +use sync::Mutex; use core::ops::Deref; use bitcoin::hashes::hex::ToHex; @@ -49,57 +51,183 @@ const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024; /// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations. const MAX_SCIDS_PER_REPLY: usize = 8000; +/// Represents the compressed public key of a node +#[derive(Clone, Copy)] +pub struct NodeId([u8; PUBLIC_KEY_SIZE]); + +impl NodeId { + /// Create a new NodeId from a public key + pub fn from_pubkey(pubkey: &PublicKey) -> Self { + NodeId(pubkey.serialize()) + } + + /// Get the public key slice from this NodeId + pub fn as_slice(&self) -> &[u8] { + &self.0 + } +} + +impl fmt::Debug for NodeId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "NodeId({})", log_bytes!(self.0)) + } +} + +impl core::hash::Hash for NodeId { + fn hash(&self, hasher: &mut H) { + self.0.hash(hasher); + } +} + +impl Eq for NodeId {} + +impl PartialEq for NodeId { + fn eq(&self, other: &Self) -> bool { + self.0[..] == other.0[..] + } +} + +impl cmp::PartialOrd for NodeId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for NodeId { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.0[..].cmp(&other.0[..]) + } +} + +impl Writeable for NodeId { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + writer.write_all(&self.0)?; + Ok(()) + } +} + +impl Readable for NodeId { + fn read(reader: &mut R) -> Result { + let mut buf = [0; PUBLIC_KEY_SIZE]; + reader.read_exact(&mut buf)?; + Ok(Self(buf)) + } +} + /// Represents the network as nodes and channels between them -#[derive(Clone, PartialEq)] pub struct NetworkGraph { genesis_hash: BlockHash, - channels: BTreeMap, - nodes: BTreeMap, + // Lock order: channels -> nodes + channels: RwLock>, + nodes: RwLock>, } -/// A simple newtype for RwLockReadGuard<'a, NetworkGraph>. -/// This exists only to make accessing a RwLock possible from -/// the C bindings, as it can be done directly in Rust code. -pub struct LockedNetworkGraph<'a>(pub RwLockReadGuard<'a, NetworkGraph>); +impl Clone for NetworkGraph { + fn clone(&self) -> Self { + let channels = self.channels.read().unwrap(); + let nodes = self.nodes.read().unwrap(); + Self { + genesis_hash: self.genesis_hash.clone(), + channels: RwLock::new(channels.clone()), + nodes: RwLock::new(nodes.clone()), + } + } +} + +/// A read-only view of [`NetworkGraph`]. +pub struct ReadOnlyNetworkGraph<'a> { + channels: RwLockReadGuard<'a, BTreeMap>, + nodes: RwLockReadGuard<'a, BTreeMap>, +} + +/// Update to the [`NetworkGraph`] based on payment failure information conveyed via the Onion +/// return packet by a node along the route. See [BOLT #4] for details. +/// +/// [BOLT #4]: https://github.com/lightningnetwork/lightning-rfc/blob/master/04-onion-routing.md +#[derive(Clone, Debug, PartialEq)] +pub enum NetworkUpdate { + /// An error indicating a `channel_update` messages should be applied via + /// [`NetworkGraph::update_channel`]. + ChannelUpdateMessage { + /// The update to apply via [`NetworkGraph::update_channel`]. + msg: ChannelUpdate, + }, + /// An error indicating only that a channel has been closed, which should be applied via + /// [`NetworkGraph::close_channel_from_update`]. + ChannelClosed { + /// The short channel id of the closed channel. + short_channel_id: u64, + /// Whether the channel should be permanently removed or temporarily disabled until a new + /// `channel_update` message is received. + is_permanent: bool, + }, + /// An error indicating only that a node has failed, which should be applied via + /// [`NetworkGraph::fail_node`]. + NodeFailure { + /// The node id of the failed node. + node_id: PublicKey, + /// Whether the node should be permanently removed from consideration or can be restored + /// when a new `channel_update` message is received. + is_permanent: bool, + } +} + +impl_writeable_tlv_based_enum_upgradable!(NetworkUpdate, + (0, ChannelUpdateMessage) => { + (0, msg, required), + }, + (2, ChannelClosed) => { + (0, short_channel_id, required), + (2, is_permanent, required), + }, + (4, NodeFailure) => { + (0, node_id, required), + (2, is_permanent, required), + }, +); + +impl, C: Deref, L: Deref> EventHandler for NetGraphMsgHandler +where C::Target: chain::Access, L::Target: Logger { + fn handle_event(&self, event: &Event) { + if let Event::PaymentPathFailed { payment_hash: _, rejected_by_dest: _, network_update, .. } = event { + if let Some(network_update) = network_update { + self.handle_network_update(network_update); + } + } + } +} /// Receives and validates network updates from peers, /// stores authentic and relevant data as a network graph. /// This network graph is then used for routing payments. /// Provides interface to help with initial routing sync by /// serving historical announcements. -pub struct NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { +/// +/// Serves as an [`EventHandler`] for applying updates from [`Event::PaymentPathFailed`] to the +/// [`NetworkGraph`]. +pub struct NetGraphMsgHandler, C: Deref, L: Deref> +where C::Target: chain::Access, L::Target: Logger +{ secp_ctx: Secp256k1, - /// Representation of the payment channel network - pub network_graph: RwLock, + network_graph: G, chain_access: Option, full_syncs_requested: AtomicUsize, pending_events: Mutex>, logger: L, } -impl NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { +impl, C: Deref, L: Deref> NetGraphMsgHandler +where C::Target: chain::Access, L::Target: Logger +{ /// Creates a new tracker of the actual state of the network of channels and nodes, - /// assuming a fresh network graph. + /// assuming an existing Network Graph. /// Chain monitor is used to make sure announced channels exist on-chain, /// channel data is correct, and that the announcement is signed with /// channel owners' keys. - pub fn new(genesis_hash: BlockHash, chain_access: Option, logger: L) -> Self { + pub fn new(network_graph: G, chain_access: Option, logger: L) -> Self { NetGraphMsgHandler { secp_ctx: Secp256k1::verification_only(), - network_graph: RwLock::new(NetworkGraph::new(genesis_hash)), - full_syncs_requested: AtomicUsize::new(0), - chain_access, - pending_events: Mutex::new(vec![]), - logger, - } - } - - /// Creates a new tracker of the actual state of the network of channels and nodes, - /// assuming an existing Network Graph. - pub fn from_net_graph(chain_access: Option, logger: L, network_graph: NetworkGraph) -> Self { - NetGraphMsgHandler { - secp_ctx: Secp256k1::verification_only(), - network_graph: RwLock::new(network_graph), + network_graph, full_syncs_requested: AtomicUsize::new(0), chain_access, pending_events: Mutex::new(vec![]), @@ -114,14 +242,6 @@ impl NetGraphMsgHandler where C::Target: chain::Access self.chain_access = chain_access; } - /// Take a read lock on the network_graph and return it in the C-bindings - /// newtype helper. This is likely only useful when called via the C - /// bindings as you can call `self.network_graph.read().unwrap()` in Rust - /// yourself. - pub fn read_locked_graph<'a>(&'a self) -> LockedNetworkGraph<'a> { - LockedNetworkGraph(self.network_graph.read().unwrap()) - } - /// Returns true when a full routing table sync should be performed with a peer. fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { //TODO: Determine whether to request a full sync based on the network map. @@ -133,16 +253,31 @@ impl NetGraphMsgHandler where C::Target: chain::Access false } } -} -impl<'a> LockedNetworkGraph<'a> { - /// Get a reference to the NetworkGraph which this read-lock contains. - pub fn graph(&self) -> &NetworkGraph { - &*self.0 + /// Applies changes to the [`NetworkGraph`] from the given update. + fn handle_network_update(&self, update: &NetworkUpdate) { + match *update { + NetworkUpdate::ChannelUpdateMessage { ref msg } => { + let short_channel_id = msg.contents.short_channel_id; + let is_enabled = msg.contents.flags & (1 << 1) != (1 << 1); + let status = if is_enabled { "enabled" } else { "disabled" }; + log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}.", short_channel_id, status); + let _ = self.network_graph.update_channel(msg, &self.secp_ctx); + }, + NetworkUpdate::ChannelClosed { short_channel_id, is_permanent } => { + let action = if is_permanent { "Removing" } else { "Disabling" }; + log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", action, short_channel_id); + self.network_graph.close_channel_from_update(short_channel_id, is_permanent); + }, + NetworkUpdate::NodeFailure { ref node_id, is_permanent } => { + let action = if is_permanent { "Removing" } else { "Disabling" }; + log_debug!(self.logger, "{} node graph entry for {} due to a payment failure.", action, node_id); + self.network_graph.fail_node(node_id, is_permanent); + }, + } } } - macro_rules! secp_verify_sig { ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr ) => { match $secp_ctx.verify($msg, $sig, $pubkey) { @@ -152,43 +287,31 @@ macro_rules! secp_verify_sig { }; } -impl RoutingMessageHandler for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger { +impl, C: Deref, L: Deref> RoutingMessageHandler for NetGraphMsgHandler +where C::Target: chain::Access, L::Target: Logger +{ fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { - self.network_graph.write().unwrap().update_node_from_announcement(msg, &self.secp_ctx)?; + self.network_graph.update_node_from_announcement(msg, &self.secp_ctx)?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && msg.contents.excess_data.len() + msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { - self.network_graph.write().unwrap().update_channel_from_announcement(msg, &self.chain_access, &self.secp_ctx)?; - log_trace!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); + self.network_graph.update_channel_from_announcement(msg, &self.chain_access, &self.secp_ctx)?; + log_gossip!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } - fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) { - match update { - &msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => { - let _ = self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx); - }, - &msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } => { - self.network_graph.write().unwrap().close_channel_from_update(short_channel_id, is_permanent); - }, - &msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent } => { - self.network_graph.write().unwrap().fail_node(node_id, is_permanent); - }, - } - } - fn handle_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result { - self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx)?; + self.network_graph.update_channel(msg, &self.secp_ctx)?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option, Option)> { - let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); - let mut iter = network_graph.get_channels().range(starting_point..); + let channels = self.network_graph.channels.read().unwrap(); + let mut iter = channels.range(starting_point..); while result.len() < batch_amount as usize { if let Some((_, ref chan)) = iter.next() { if chan.announcement_message.is_some() { @@ -214,14 +337,14 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh } fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec { - let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); + let nodes = self.network_graph.nodes.read().unwrap(); let mut iter = if let Some(pubkey) = starting_point { - let mut iter = network_graph.get_nodes().range((*pubkey)..); + let mut iter = nodes.range(NodeId::from_pubkey(pubkey)..); iter.next(); iter } else { - network_graph.get_nodes().range(..) + nodes.range::(..) }; while result.len() < batch_amount as usize { if let Some((_, ref node)) = iter.next() { @@ -254,7 +377,7 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh } // Check if we need to perform a full synchronization with this peer - if !self.should_request_full_sync(their_node_id) { + if !self.should_request_full_sync(&their_node_id) { return (); } @@ -265,7 +388,7 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh pending_events.push(MessageSendEvent::SendChannelRangeQuery { node_id: their_node_id.clone(), msg: QueryChannelRange { - chain_hash: self.network_graph.read().unwrap().genesis_hash, + chain_hash: self.network_graph.genesis_hash, first_blocknum, number_of_blocks, }, @@ -327,8 +450,6 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh fn handle_query_channel_range(&self, their_node_id: &PublicKey, msg: QueryChannelRange) -> Result<(), LightningError> { log_debug!(self.logger, "Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks); - let network_graph = self.network_graph.read().unwrap(); - let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0); // We might receive valid queries with end_blocknum that would overflow SCID conversion. @@ -336,7 +457,7 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); // Per spec, we must reply to a query. Send an empty message when things are invalid. - if msg.chain_hash != network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { + if msg.chain_hash != self.network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(MessageSendEvent::SendReplyChannelRange { node_id: their_node_id.clone(), @@ -358,7 +479,8 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh // (has at least one update). A peer may still want to know the channel // exists even if its not yet routable. let mut batches: Vec> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)]; - for (_, ref chan) in network_graph.get_channels().range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) { + let channels = self.network_graph.channels.read().unwrap(); + for (_, ref chan) in channels.range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) { if let Some(chan_announcement) = &chan.announcement_message { // Construct a new batch if last one is full if batches.last().unwrap().len() == batches.last().unwrap().capacity() { @@ -369,27 +491,43 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh batch.push(chan_announcement.contents.short_channel_id); } } - drop(network_graph); + drop(channels); let mut pending_events = self.pending_events.lock().unwrap(); let batch_count = batches.len(); + let mut prev_batch_endblock = msg.first_blocknum; for (batch_index, batch) in batches.into_iter().enumerate() { - // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum and subsequent - // must be >= the prior reply. We'll simplify this by using zero since its still spec compliant and - // sequence completion is now explicitly. - let first_blocknum = 0; - - // Per spec, the final end_blocknum needs to be >= the query's end_blocknum, so we'll use the - // query's value. Prior batches must use the number of blocks that fit into the message. We'll - // base this off the last SCID in the batch since we've somewhat abusing first_blocknum. - let number_of_blocks = if batch_index == batch_count-1 { - msg.end_blocknum() - } else { - block_from_scid(batch.last().unwrap()) + 1 + // Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum` + // and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`. + // + // Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each + // reply is >= the previous reply's `first_blocknum` and either exactly the previous + // reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a + // significant diversion from the requirements set by the spec, and, in case of blocks + // with no channel opens (e.g. empty blocks), requires that we use the previous value + // and *not* derive the first_blocknum from the actual first block of the reply. + let first_blocknum = prev_batch_endblock; + + // Each message carries the number of blocks (from the `first_blocknum`) its contents + // fit in. Though there is no requirement that we use exactly the number of blocks its + // contents are from, except for the bogus requirements c-lightning enforces, above. + // + // Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be + // >= the query's end block. Thus, for the last reply, we calculate the difference + // between the query's end block and the start of the reply. + // + // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and + // first_blocknum will be either msg.first_blocknum or a higher block height. + let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 { + (true, msg.end_blocknum() - first_blocknum) + } + // Prior replies should use the number of blocks that fit into the reply. Overflow + // safe since first_blocknum is always <= last SCID's block. + else { + (false, block_from_scid(batch.last().unwrap()) - first_blocknum) }; - // Only true for the last message in a sequence - let sync_complete = batch_index == batch_count - 1; + prev_batch_endblock = first_blocknum + number_of_blocks; pending_events.push(MessageSendEvent::SendReplyChannelRange { node_id: their_node_id.clone(), @@ -415,7 +553,7 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh } } -impl MessageSendEventsProvider for NetGraphMsgHandler +impl, C: Deref, L: Deref> MessageSendEventsProvider for NetGraphMsgHandler where C::Target: chain::Access, L::Target: Logger, @@ -476,11 +614,11 @@ pub struct ChannelInfo { /// Protocol features of a channel communicated during its announcement pub features: ChannelFeatures, /// Source node of the first direction of a channel - pub node_one: PublicKey, + pub node_one: NodeId, /// Details about the first direction of a channel pub one_to_two: Option, /// Source node of the second direction of a channel - pub node_two: PublicKey, + pub node_two: NodeId, /// Details about the second direction of a channel pub two_to_one: Option, /// The channel capacity as seen on-chain, if chain lookup is available. @@ -495,7 +633,7 @@ pub struct ChannelInfo { impl fmt::Display for ChannelInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}", - log_bytes!(self.features.encode()), log_pubkey!(self.node_one), self.one_to_two, log_pubkey!(self.node_two), self.two_to_one)?; + log_bytes!(self.features.encode()), log_bytes!(self.node_one.as_slice()), self.one_to_two, log_bytes!(self.node_two.as_slice()), self.two_to_one)?; Ok(()) } } @@ -512,7 +650,7 @@ impl_writeable_tlv_based!(ChannelInfo, { /// Fees for routing via a given channel or a node -#[derive(Eq, PartialEq, Copy, Clone, Debug)] +#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)] pub struct RoutingFees { /// Flat routing fee in satoshis pub base_msat: u32, @@ -591,17 +729,19 @@ const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; impl Writeable for NetworkGraph { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); self.genesis_hash.write(writer)?; - (self.channels.len() as u64).write(writer)?; - for (ref chan_id, ref chan_info) in self.channels.iter() { + let channels = self.channels.read().unwrap(); + (channels.len() as u64).write(writer)?; + for (ref chan_id, ref chan_info) in channels.iter() { (*chan_id).write(writer)?; chan_info.write(writer)?; } - (self.nodes.len() as u64).write(writer)?; - for (ref node_id, ref node_info) in self.nodes.iter() { + let nodes = self.nodes.read().unwrap(); + (nodes.len() as u64).write(writer)?; + for (ref node_id, ref node_info) in nodes.iter() { node_id.write(writer)?; node_info.write(writer)?; } @@ -612,7 +752,7 @@ impl Writeable for NetworkGraph { } impl Readable for NetworkGraph { - fn read(reader: &mut R) -> Result { + fn read(reader: &mut R) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let genesis_hash: BlockHash = Readable::read(reader)?; @@ -634,8 +774,8 @@ impl Readable for NetworkGraph { Ok(NetworkGraph { genesis_hash, - channels, - nodes, + channels: RwLock::new(channels), + nodes: RwLock::new(nodes), }) } } @@ -643,47 +783,42 @@ impl Readable for NetworkGraph { impl fmt::Display for NetworkGraph { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { writeln!(f, "Network map\n[Channels]")?; - for (key, val) in self.channels.iter() { + for (key, val) in self.channels.read().unwrap().iter() { writeln!(f, " {}: {}", key, val)?; } writeln!(f, "[Nodes]")?; - for (key, val) in self.nodes.iter() { - writeln!(f, " {}: {}", log_pubkey!(key), val)?; + for (&node_id, val) in self.nodes.read().unwrap().iter() { + writeln!(f, " {}: {}", log_bytes!(node_id.as_slice()), val)?; } Ok(()) } } -impl NetworkGraph { - /// Returns all known valid channels' short ids along with announced channel info. - /// - /// (C-not exported) because we have no mapping for `BTreeMap`s - pub fn get_channels<'a>(&'a self) -> &'a BTreeMap { &self.channels } - /// Returns all known nodes' public keys along with announced node info. - /// - /// (C-not exported) because we have no mapping for `BTreeMap`s - pub fn get_nodes<'a>(&'a self) -> &'a BTreeMap { &self.nodes } - - /// Get network addresses by node id. - /// Returns None if the requested node is completely unknown, - /// or if node announcement for the node was never received. - /// - /// (C-not exported) as there is no practical way to track lifetimes of returned values. - pub fn get_addresses<'a>(&'a self, pubkey: &PublicKey) -> Option<&'a Vec> { - if let Some(node) = self.nodes.get(pubkey) { - if let Some(node_info) = node.announcement_info.as_ref() { - return Some(&node_info.addresses) - } - } - None +impl PartialEq for NetworkGraph { + fn eq(&self, other: &Self) -> bool { + self.genesis_hash == other.genesis_hash && + *self.channels.read().unwrap() == *other.channels.read().unwrap() && + *self.nodes.read().unwrap() == *other.nodes.read().unwrap() } +} +impl NetworkGraph { /// Creates a new, empty, network graph. pub fn new(genesis_hash: BlockHash) -> NetworkGraph { Self { genesis_hash, - channels: BTreeMap::new(), - nodes: BTreeMap::new(), + channels: RwLock::new(BTreeMap::new()), + nodes: RwLock::new(BTreeMap::new()), + } + } + + /// Returns a read-only view of the network graph. + pub fn read_only(&'_ self) -> ReadOnlyNetworkGraph<'_> { + let channels = self.channels.read().unwrap(); + let nodes = self.nodes.read().unwrap(); + ReadOnlyNetworkGraph { + channels, + nodes, } } @@ -693,7 +828,7 @@ impl NetworkGraph { /// You probably don't want to call this directly, instead relying on a NetGraphMsgHandler's /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. - pub fn update_node_from_announcement(&mut self, msg: &msgs::NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { + pub fn update_node_from_announcement(&self, msg: &msgs::NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id); self.update_node_from_announcement_intern(&msg.contents, Some(&msg)) @@ -703,17 +838,22 @@ impl NetworkGraph { /// given node announcement without verifying the associated signatures. Because we aren't /// given the associated signatures here we cannot relay the node announcement to any of our /// peers. - pub fn update_node_from_unsigned_announcement(&mut self, msg: &msgs::UnsignedNodeAnnouncement) -> Result<(), LightningError> { + pub fn update_node_from_unsigned_announcement(&self, msg: &msgs::UnsignedNodeAnnouncement) -> Result<(), LightningError> { self.update_node_from_announcement_intern(msg, None) } - fn update_node_from_announcement_intern(&mut self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>) -> Result<(), LightningError> { - match self.nodes.get_mut(&msg.node_id) { + fn update_node_from_announcement_intern(&self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>) -> Result<(), LightningError> { + match self.nodes.write().unwrap().get_mut(&NodeId::from_pubkey(&msg.node_id)) { None => Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}), Some(node) => { if let Some(node_info) = node.announcement_info.as_ref() { - if node_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + // The timestamp field is somewhat of a misnomer - the BOLTs use it to order + // updates to ensure you always have the latest one, only vaguely suggesting + // that it be at least the current time. + if node_info.last_update > msg.timestamp { + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)}); + } else if node_info.last_update == msg.timestamp { + return Err(LightningError{err: "Update had the same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); } } @@ -743,10 +883,12 @@ impl NetworkGraph { /// /// If a `chain::Access` object is provided via `chain_access`, it will be called to verify /// the corresponding UTXO exists on chain and is correctly-formatted. - pub fn update_channel_from_announcement - (&mut self, msg: &msgs::ChannelAnnouncement, chain_access: &Option, secp_ctx: &Secp256k1) - -> Result<(), LightningError> - where C::Target: chain::Access { + pub fn update_channel_from_announcement( + &self, msg: &msgs::ChannelAnnouncement, chain_access: &Option, secp_ctx: &Secp256k1 + ) -> Result<(), LightningError> + where + C::Target: chain::Access, + { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &msg.contents.node_id_1); secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &msg.contents.node_id_2); @@ -761,17 +903,21 @@ impl NetworkGraph { /// /// If a `chain::Access` object is provided via `chain_access`, it will be called to verify /// the corresponding UTXO exists on chain and is correctly-formatted. - pub fn update_channel_from_unsigned_announcement - (&mut self, msg: &msgs::UnsignedChannelAnnouncement, chain_access: &Option) - -> Result<(), LightningError> - where C::Target: chain::Access { + pub fn update_channel_from_unsigned_announcement( + &self, msg: &msgs::UnsignedChannelAnnouncement, chain_access: &Option + ) -> Result<(), LightningError> + where + C::Target: chain::Access, + { self.update_channel_from_unsigned_announcement_intern(msg, None, chain_access) } - fn update_channel_from_unsigned_announcement_intern - (&mut self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, chain_access: &Option) - -> Result<(), LightningError> - where C::Target: chain::Access { + fn update_channel_from_unsigned_announcement_intern( + &self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, chain_access: &Option + ) -> Result<(), LightningError> + where + C::Target: chain::Access, + { if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 { return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); } @@ -808,16 +954,18 @@ impl NetworkGraph { let chan_info = ChannelInfo { features: msg.features.clone(), - node_one: msg.node_id_1.clone(), + node_one: NodeId::from_pubkey(&msg.node_id_1), one_to_two: None, - node_two: msg.node_id_2.clone(), + node_two: NodeId::from_pubkey(&msg.node_id_2), two_to_one: None, capacity_sats: utxo_value, announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY { full_msg.cloned() } else { None }, }; - match self.channels.entry(msg.short_channel_id) { + let mut channels = self.channels.write().unwrap(); + let mut nodes = self.nodes.write().unwrap(); + match channels.entry(msg.short_channel_id) { BtreeEntry::Occupied(mut entry) => { //TODO: because asking the blockchain if short_channel_id is valid is only optional //in the blockchain API, we need to handle it smartly here, though it's unclear @@ -831,10 +979,10 @@ impl NetworkGraph { // b) we don't track UTXOs of channels we know about and remove them if they // get reorg'd out. // c) it's unclear how to do so without exposing ourselves to massive DoS risk. - Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.short_channel_id); + Self::remove_channel_in_nodes(&mut nodes, &entry.get(), msg.short_channel_id); *entry.get_mut() = chan_info; } else { - return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError}) + return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); } }, BtreeEntry::Vacant(entry) => { @@ -844,7 +992,7 @@ impl NetworkGraph { macro_rules! add_channel_to_node { ( $node_id: expr ) => { - match self.nodes.entry($node_id) { + match nodes.entry($node_id) { BtreeEntry::Occupied(node_entry) => { node_entry.into_mut().channels.push(msg.short_channel_id); }, @@ -859,8 +1007,8 @@ impl NetworkGraph { }; } - add_channel_to_node!(msg.node_id_1); - add_channel_to_node!(msg.node_id_2); + add_channel_to_node!(NodeId::from_pubkey(&msg.node_id_1)); + add_channel_to_node!(NodeId::from_pubkey(&msg.node_id_2)); Ok(()) } @@ -869,13 +1017,15 @@ impl NetworkGraph { /// If permanent, removes a channel from the local storage. /// May cause the removal of nodes too, if this was their last channel. /// If not permanent, makes channels unavailable for routing. - pub fn close_channel_from_update(&mut self, short_channel_id: u64, is_permanent: bool) { + pub fn close_channel_from_update(&self, short_channel_id: u64, is_permanent: bool) { + let mut channels = self.channels.write().unwrap(); if is_permanent { - if let Some(chan) = self.channels.remove(&short_channel_id) { - Self::remove_channel_in_nodes(&mut self.nodes, &chan, short_channel_id); + if let Some(chan) = channels.remove(&short_channel_id) { + let mut nodes = self.nodes.write().unwrap(); + Self::remove_channel_in_nodes(&mut nodes, &chan, short_channel_id); } } else { - if let Some(chan) = self.channels.get_mut(&short_channel_id) { + if let Some(chan) = channels.get_mut(&short_channel_id) { if let Some(one_to_two) = chan.one_to_two.as_mut() { one_to_two.enabled = false; } @@ -886,7 +1036,8 @@ impl NetworkGraph { } } - fn fail_node(&mut self, _node_id: &PublicKey, is_permanent: bool) { + /// Marks a node in the graph as failed. + pub fn fail_node(&self, _node_id: &PublicKey, is_permanent: bool) { if is_permanent { // TODO: Wholly remove the node } else { @@ -900,23 +1051,24 @@ impl NetworkGraph { /// You probably don't want to call this directly, instead relying on a NetGraphMsgHandler's /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept /// routing messages from a source using a protocol other than the lightning P2P protocol. - pub fn update_channel(&mut self, msg: &msgs::ChannelUpdate, secp_ctx: &Secp256k1) -> Result<(), LightningError> { + pub fn update_channel(&self, msg: &msgs::ChannelUpdate, secp_ctx: &Secp256k1) -> Result<(), LightningError> { self.update_channel_intern(&msg.contents, Some(&msg), Some((&msg.signature, secp_ctx))) } /// For an already known (from announcement) channel, update info about one of the directions /// of the channel without verifying the associated signatures. Because we aren't given the /// associated signatures here we cannot relay the channel update to any of our peers. - pub fn update_channel_unsigned(&mut self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { + pub fn update_channel_unsigned(&self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> { self.update_channel_intern(msg, None, None::<(&secp256k1::Signature, &Secp256k1)>) } - fn update_channel_intern(&mut self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig_info: Option<(&secp256k1::Signature, &Secp256k1)>) -> Result<(), LightningError> { + fn update_channel_intern(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig_info: Option<(&secp256k1::Signature, &Secp256k1)>) -> Result<(), LightningError> { let dest_node_id; let chan_enabled = msg.flags & (1 << 1) != (1 << 1); let chan_was_enabled; - match self.channels.get_mut(&msg.short_channel_id) { + let mut channels = self.channels.write().unwrap(); + match channels.get_mut(&msg.short_channel_id) { None => return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}), Some(channel) => { if let OptionalField::Present(htlc_maximum_msat) = msg.htlc_maximum_msat { @@ -935,8 +1087,16 @@ impl NetworkGraph { macro_rules! maybe_update_channel_info { ( $target: expr, $src_node: expr) => { if let Some(existing_chan_info) = $target.as_ref() { - if existing_chan_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + // The timestamp field is somewhat of a misnomer - the BOLTs use it to + // order updates to ensure you always have the latest one, only + // suggesting that it be at least the current time. For + // channel_updates specifically, the BOLTs discuss the possibility of + // pruning based on the timestamp field being more than two weeks old, + // but only in the non-normative section. + if existing_chan_info.last_update > msg.timestamp { + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)}); + } else if existing_chan_info.last_update == msg.timestamp { + return Err(LightningError{err: "Update had same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); } chan_was_enabled = existing_chan_info.enabled; } else { @@ -966,21 +1126,28 @@ impl NetworkGraph { if msg.flags & 1 == 1 { dest_node_id = channel.node_one.clone(); if let Some((sig, ctx)) = sig_info { - secp_verify_sig!(ctx, &msg_hash, &sig, &channel.node_two); + secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_two.as_slice()).map_err(|_| LightningError{ + err: "Couldn't parse source node pubkey".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug) + })?); } maybe_update_channel_info!(channel.two_to_one, channel.node_two); } else { dest_node_id = channel.node_two.clone(); if let Some((sig, ctx)) = sig_info { - secp_verify_sig!(ctx, &msg_hash, &sig, &channel.node_one); + secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_one.as_slice()).map_err(|_| LightningError{ + err: "Couldn't parse destination node pubkey".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug) + })?); } maybe_update_channel_info!(channel.one_to_two, channel.node_one); } } } + let mut nodes = self.nodes.write().unwrap(); if chan_enabled { - let node = self.nodes.get_mut(&dest_node_id).unwrap(); + let node = nodes.get_mut(&dest_node_id).unwrap(); let mut base_msat = msg.fee_base_msat; let mut proportional_millionths = msg.fee_proportional_millionths; if let Some(fees) = node.lowest_inbound_channel_fees { @@ -992,11 +1159,11 @@ impl NetworkGraph { proportional_millionths }); } else if chan_was_enabled { - let node = self.nodes.get_mut(&dest_node_id).unwrap(); + let node = nodes.get_mut(&dest_node_id).unwrap(); let mut lowest_inbound_channel_fees = None; for chan_id in node.channels.iter() { - let chan = self.channels.get(chan_id).unwrap(); + let chan = channels.get(chan_id).unwrap(); let chan_info_opt; if chan.node_one == dest_node_id { chan_info_opt = chan.two_to_one.as_ref(); @@ -1019,7 +1186,7 @@ impl NetworkGraph { Ok(()) } - fn remove_channel_in_nodes(nodes: &mut BTreeMap, chan: &ChannelInfo, short_channel_id: u64) { + fn remove_channel_in_nodes(nodes: &mut BTreeMap, chan: &ChannelInfo, short_channel_id: u64) { macro_rules! remove_from_node { ($node_id: expr) => { if let BtreeEntry::Occupied(mut entry) = nodes.entry($node_id) { @@ -1040,18 +1207,47 @@ impl NetworkGraph { } } +impl ReadOnlyNetworkGraph<'_> { + /// Returns all known valid channels' short ids along with announced channel info. + /// + /// (C-not exported) because we have no mapping for `BTreeMap`s + pub fn channels(&self) -> &BTreeMap { + &*self.channels + } + + /// Returns all known nodes' public keys along with announced node info. + /// + /// (C-not exported) because we have no mapping for `BTreeMap`s + pub fn nodes(&self) -> &BTreeMap { + &*self.nodes + } + + /// Get network addresses by node id. + /// Returns None if the requested node is completely unknown, + /// or if node announcement for the node was never received. + pub fn get_addresses(&self, pubkey: &PublicKey) -> Option> { + if let Some(node) = self.nodes.get(&NodeId::from_pubkey(&pubkey)) { + if let Some(node_info) = node.announcement_info.as_ref() { + return Some(node_info.addresses.clone()) + } + } + None + } +} + #[cfg(test)] mod tests { use chain; + use ln::PaymentHash; use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; - use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, MAX_EXCESS_BYTES_FOR_RELAY}; + use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, NetworkUpdate, MAX_EXCESS_BYTES_FOR_RELAY}; use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, - UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, HTLCFailChannelUpdate, + UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, ReplyChannelRange, ReplyShortChannelIdsEnd, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT}; use util::test_utils; use util::logger::Logger; use util::ser::{Readable, Writeable}; - use util::events::{MessageSendEvent, MessageSendEventsProvider}; + use util::events::{Event, EventHandler, MessageSendEvent, MessageSendEventsProvider}; use util::scid_utils::scid_from_parts; use bitcoin::hashes::sha256d::Hash as Sha256dHash; @@ -1067,20 +1263,28 @@ mod tests { use bitcoin::secp256k1::key::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; + use io; use prelude::*; - use std::sync::Arc; + use sync::Arc; + + fn create_network_graph() -> NetworkGraph { + let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); + NetworkGraph::new(genesis_hash) + } - fn create_net_graph_msg_handler() -> (Secp256k1, NetGraphMsgHandler, Arc>) { + fn create_net_graph_msg_handler(network_graph: &NetworkGraph) -> ( + Secp256k1, NetGraphMsgHandler<&NetworkGraph, Arc, Arc> + ) { let secp_ctx = Secp256k1::new(); let logger = Arc::new(test_utils::TestLogger::new()); - let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); - let net_graph_msg_handler = NetGraphMsgHandler::new(genesis_hash, None, Arc::clone(&logger)); + let net_graph_msg_handler = NetGraphMsgHandler::new(network_graph, None, Arc::clone(&logger)); (secp_ctx, net_graph_msg_handler) } #[test] fn request_full_sync_finite_times() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); assert!(net_graph_msg_handler.should_request_full_sync(&node_id)); @@ -1093,7 +1297,8 @@ mod tests { #[test] fn handling_node_announcements() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); @@ -1235,18 +1440,18 @@ mod tests { }; // Test if the UTXO lookups were not supported - let mut net_graph_msg_handler = NetGraphMsgHandler::new(genesis_block(Network::Testnet).header.block_hash(), None, Arc::clone(&logger)); + let network_graph = NetworkGraph::new(genesis_block(Network::Testnet).header.block_hash()); + let mut net_graph_msg_handler = NetGraphMsgHandler::new(&network_graph, None, Arc::clone(&logger)); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; { - let network = net_graph_msg_handler.network_graph.read().unwrap(); - match network.get_channels().get(&unsigned_announcement.short_channel_id) { + match network_graph.read_only().channels().get(&unsigned_announcement.short_channel_id) { None => panic!(), Some(_) => () - } + }; } // If we receive announcement for the same channel (with UTXO lookups disabled), @@ -1259,7 +1464,8 @@ mod tests { // Test if an associated transaction were not on-chain (or not confirmed). let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx); - net_graph_msg_handler = NetGraphMsgHandler::new(chain_source.clone().genesis_hash, Some(chain_source.clone()), Arc::clone(&logger)); + let network_graph = NetworkGraph::new(genesis_block(Network::Testnet).header.block_hash()); + net_graph_msg_handler = NetGraphMsgHandler::new(&network_graph, Some(chain_source.clone()), Arc::clone(&logger)); unsigned_announcement.short_channel_id += 1; msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); @@ -1294,11 +1500,10 @@ mod tests { }; { - let network = net_graph_msg_handler.network_graph.read().unwrap(); - match network.get_channels().get(&unsigned_announcement.short_channel_id) { + match network_graph.read_only().channels().get(&unsigned_announcement.short_channel_id) { None => panic!(), Some(_) => () - } + }; } // If we receive announcement for the same channel (but TX is not confirmed), @@ -1325,13 +1530,12 @@ mod tests { _ => panic!() }; { - let network = net_graph_msg_handler.network_graph.read().unwrap(); - match network.get_channels().get(&unsigned_announcement.short_channel_id) { + match network_graph.read_only().channels().get(&unsigned_announcement.short_channel_id) { Some(channel_entry) => { assert_eq!(channel_entry.features, ChannelFeatures::empty()); }, _ => panic!() - } + }; } // Don't relay valid channels with excess data @@ -1383,7 +1587,8 @@ mod tests { let secp_ctx = Secp256k1::new(); let logger: Arc = Arc::new(test_utils::TestLogger::new()); let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); - let net_graph_msg_handler = NetGraphMsgHandler::new(genesis_block(Network::Testnet).header.block_hash(), Some(chain_source.clone()), Arc::clone(&logger)); + let network_graph = NetworkGraph::new(genesis_block(Network::Testnet).header.block_hash()); + let net_graph_msg_handler = NetGraphMsgHandler::new(&network_graph, Some(chain_source.clone()), Arc::clone(&logger)); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); @@ -1455,14 +1660,13 @@ mod tests { }; { - let network = net_graph_msg_handler.network_graph.read().unwrap(); - match network.get_channels().get(&short_channel_id) { + match network_graph.read_only().channels().get(&short_channel_id) { None => panic!(), Some(channel_info) => { assert_eq!(channel_info.one_to_two.as_ref().unwrap().cltv_expiry_delta, 144); assert!(channel_info.two_to_one.is_none()); } - } + }; } unsigned_channel_update.timestamp += 100; @@ -1529,7 +1733,7 @@ mod tests { match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => panic!(), - Err(e) => assert_eq!(e.err, "Update older than last processed update") + Err(e) => assert_eq!(e.err, "Update had same timestamp as last processed update") }; unsigned_channel_update.timestamp += 500; @@ -1547,8 +1751,14 @@ mod tests { } #[test] - fn handling_htlc_fail_channel_update() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + fn handling_network_update() { + let logger = test_utils::TestLogger::new(); + let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); + let genesis_hash = genesis_block(Network::Testnet).header.block_hash(); + let network_graph = NetworkGraph::new(genesis_hash); + let net_graph_msg_handler = NetGraphMsgHandler::new(&network_graph, Some(chain_source.clone()), &logger); + let secp_ctx = Secp256k1::new(); + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); @@ -1561,8 +1771,7 @@ mod tests { { // There is no nodes in the table at the beginning. - let network = net_graph_msg_handler.network_graph.read().unwrap(); - assert_eq!(network.get_nodes().len(), 0); + assert_eq!(network_graph.read_only().nodes().len(), 0); } { @@ -1586,10 +1795,9 @@ mod tests { bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; - match net_graph_msg_handler.handle_channel_announcement(&valid_channel_announcement) { - Ok(_) => (), - Err(_) => panic!() - }; + let chain_source: Option<&test_utils::TestChainSource> = None; + assert!(network_graph.update_channel_from_announcement(&valid_channel_announcement, &chain_source, &secp_ctx).is_ok()); + assert!(network_graph.read_only().channels().get(&short_channel_id).is_some()); let unsigned_channel_update = UnsignedChannelUpdate { chain_hash, @@ -1609,61 +1817,88 @@ mod tests { contents: unsigned_channel_update.clone() }; - match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { - Ok(res) => assert!(res), - _ => panic!() - }; + assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); + + net_graph_msg_handler.handle_event(&Event::PaymentPathFailed { + payment_id: None, + payment_hash: PaymentHash([0; 32]), + rejected_by_dest: false, + all_paths_failed: true, + path: vec![], + network_update: Some(NetworkUpdate::ChannelUpdateMessage { + msg: valid_channel_update, + }), + short_channel_id: None, + retry: None, + error_code: None, + error_data: None, + }); + + assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); } // Non-permanent closing just disables a channel { - let network = net_graph_msg_handler.network_graph.read().unwrap(); - match network.get_channels().get(&short_channel_id) { + match network_graph.read_only().channels().get(&short_channel_id) { None => panic!(), Some(channel_info) => { - assert!(channel_info.one_to_two.is_some()); + assert!(channel_info.one_to_two.as_ref().unwrap().enabled); } - } - } - - let channel_close_msg = HTLCFailChannelUpdate::ChannelClosed { - short_channel_id, - is_permanent: false - }; + }; - net_graph_msg_handler.handle_htlc_fail_channel_update(&channel_close_msg); + net_graph_msg_handler.handle_event(&Event::PaymentPathFailed { + payment_id: None, + payment_hash: PaymentHash([0; 32]), + rejected_by_dest: false, + all_paths_failed: true, + path: vec![], + network_update: Some(NetworkUpdate::ChannelClosed { + short_channel_id, + is_permanent: false, + }), + short_channel_id: None, + retry: None, + error_code: None, + error_data: None, + }); - // Non-permanent closing just disables a channel - { - let network = net_graph_msg_handler.network_graph.read().unwrap(); - match network.get_channels().get(&short_channel_id) { + match network_graph.read_only().channels().get(&short_channel_id) { None => panic!(), Some(channel_info) => { assert!(!channel_info.one_to_two.as_ref().unwrap().enabled); } - } + }; } - let channel_close_msg = HTLCFailChannelUpdate::ChannelClosed { - short_channel_id, - is_permanent: true - }; - - net_graph_msg_handler.handle_htlc_fail_channel_update(&channel_close_msg); - // Permanent closing deletes a channel { - let network = net_graph_msg_handler.network_graph.read().unwrap(); - assert_eq!(network.get_channels().len(), 0); + net_graph_msg_handler.handle_event(&Event::PaymentPathFailed { + payment_id: None, + payment_hash: PaymentHash([0; 32]), + rejected_by_dest: false, + all_paths_failed: true, + path: vec![], + network_update: Some(NetworkUpdate::ChannelClosed { + short_channel_id, + is_permanent: true, + }), + short_channel_id: None, + retry: None, + error_code: None, + error_data: None, + }); + + assert_eq!(network_graph.read_only().channels().len(), 0); // Nodes are also deleted because there are no associated channels anymore - assert_eq!(network.get_nodes().len(), 0); + assert_eq!(network_graph.read_only().nodes().len(), 0); } - // TODO: Test HTLCFailChannelUpdate::NodeFailure, which is not implemented yet. + // TODO: Test NetworkUpdate::NodeFailure, which is not implemented yet. } #[test] fn getting_next_channel_announcements() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); @@ -1797,7 +2032,8 @@ mod tests { #[test] fn getting_next_node_announcements() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); @@ -1914,7 +2150,8 @@ mod tests { #[test] fn network_graph_serialization() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); @@ -1971,17 +2208,17 @@ mod tests { Err(_) => panic!() }; - let network = net_graph_msg_handler.network_graph.write().unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); - assert!(!network.get_nodes().is_empty()); - assert!(!network.get_channels().is_empty()); - network.write(&mut w).unwrap(); - assert!(::read(&mut ::std::io::Cursor::new(&w.0)).unwrap() == *network); + assert!(!network_graph.read_only().nodes().is_empty()); + assert!(!network_graph.read_only().channels().is_empty()); + network_graph.write(&mut w).unwrap(); + assert!(::read(&mut io::Cursor::new(&w.0)).unwrap() == network_graph); } #[test] fn calling_sync_routing_table() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); @@ -2018,7 +2255,8 @@ mod tests { // The initial implementation allows syncing with the first 5 peers after // which should_request_full_sync will return false { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let init_msg = Init { features: InitFeatures::known() }; for n in 1..7 { let node_privkey = &SecretKey::from_slice(&[n; 32]).unwrap(); @@ -2037,7 +2275,8 @@ mod tests { #[test] fn handling_reply_channel_range() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1); @@ -2085,7 +2324,8 @@ mod tests { #[test] fn handling_reply_short_channel_ids() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); @@ -2114,7 +2354,8 @@ mod tests { #[test] fn handling_query_channel_range() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let chain_hash = genesis_block(Network::Testnet).header.block_hash(); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); @@ -2235,8 +2476,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 0x01000000, + first_blocknum: 0xffffff, + number_of_blocks: 1, sync_complete: true, short_channel_ids: vec![] }, @@ -2256,8 +2497,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 2000, + first_blocknum: 1000, + number_of_blocks: 1000, sync_complete: true, short_channel_ids: vec![], } @@ -2277,8 +2518,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 0xffffffff, + first_blocknum: 0xfe0000, + number_of_blocks: 0xffffffff - 0xfe0000, sync_complete: true, short_channel_ids: vec![ 0xfffffe_ffffff_ffff, // max @@ -2300,8 +2541,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108000, + first_blocknum: 100000, + number_of_blocks: 8000, sync_complete: true, short_channel_ids: (100000..=107999) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@ -2323,8 +2564,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108000, + first_blocknum: 100000, + number_of_blocks: 7999, sync_complete: false, short_channel_ids: (100000..=107999) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@ -2332,8 +2573,8 @@ mod tests { }, ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108001, + first_blocknum: 107999, + number_of_blocks: 2, sync_complete: true, short_channel_ids: vec![ scid_from_parts(108000, 0, 0).unwrap(), @@ -2355,8 +2596,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108002, + first_blocknum: 100002, + number_of_blocks: 7999, sync_complete: false, short_channel_ids: (100002..=108001) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@ -2364,8 +2605,8 @@ mod tests { }, ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108002, + first_blocknum: 108001, + number_of_blocks: 1, sync_complete: true, short_channel_ids: vec![ scid_from_parts(108001, 1, 0).unwrap(), @@ -2376,12 +2617,15 @@ mod tests { } fn do_handling_query_channel_range( - net_graph_msg_handler: &NetGraphMsgHandler, Arc>, + net_graph_msg_handler: &NetGraphMsgHandler<&NetworkGraph, Arc, Arc>, test_node_id: &PublicKey, msg: QueryChannelRange, expected_ok: bool, expected_replies: Vec ) { + let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1); + let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum; + let query_end_blocknum = msg.end_blocknum(); let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg); if expected_ok { @@ -2403,6 +2647,17 @@ mod tests { assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks); assert_eq!(msg.sync_complete, expected_reply.sync_complete); assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids); + + // Enforce exactly the sequencing requirements present on c-lightning v0.9.3 + assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1)); + assert!(msg.first_blocknum >= max_firstblocknum); + max_firstblocknum = msg.first_blocknum; + c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks); + + // Check that the last block count is >= the query's end_blocknum + if i == events.len() - 1 { + assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum); + } }, _ => panic!("expected MessageSendEvent::SendReplyChannelRange"), } @@ -2411,7 +2666,8 @@ mod tests { #[test] fn handling_query_short_channel_ids() { - let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); + let network_graph = create_network_graph(); + let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph); let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);