use util::ser::{Writeable, Readable, Writer};
use util::logger::Logger;
use util::events::{MessageSendEvent, MessageSendEventsProvider};
-use util::scid_utils::{block_from_scid, scid_from_parts};
+use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
-use std::{cmp, fmt};
+use core::{cmp, fmt};
use std::sync::{RwLock, RwLockReadGuard};
-use std::sync::atomic::{AtomicUsize, Ordering};
+use core::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::collections::BTreeMap;
use std::collections::btree_map::Entry as BtreeEntry;
-use std::ops::Deref;
+use core::ops::Deref;
use bitcoin::hashes::hex::ToHex;
/// The maximum number of extra bytes which we do not understand in a gossip message before we will
/// refuse to relay the message.
const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024;
+/// Maximum number of short_channel_ids that will be encoded in one gossip reply message.
+/// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations.
+const MAX_SCIDS_PER_REPLY: usize = 8000;
+
/// Represents the network as nodes and channels between them
#[derive(Clone, PartialEq)]
pub struct NetworkGraph {
full_syncs_requested: AtomicUsize,
pending_events: Mutex<Vec<MessageSendEvent>>,
logger: L,
-
- /// Maximum number of short_channel_ids that will be encoded in one gossip reply message.
- /// Default is 8000 which ensures a reply fits within the 65k payload limit and is
- /// consistent with other implementations.
- max_reply_scids: usize,
}
impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: chain::Access, L::Target: Logger {
chain_access,
pending_events: Mutex::new(vec![]),
logger,
- max_reply_scids: 8000,
}
}
chain_access,
pending_events: Mutex::new(vec![]),
logger,
- max_reply_scids: 8000,
}
}
};
}
-impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for NetGraphMsgHandler<C, L> where C::Target: chain::Access, L::Target: Logger {
+impl<C: Deref , L: Deref > RoutingMessageHandler for NetGraphMsgHandler<C, L> where C::Target: chain::Access, L::Target: Logger {
fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
self.network_graph.write().unwrap().update_node_from_announcement(msg, &self.secp_ctx)?;
Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY &&
Ok(())
}
- /// Processes a query from a peer by finding channels whose funding UTXOs
+ /// Processes a query from a peer by finding announced/public channels whose funding UTXOs
/// are in the specified block range. Due to message size limits, large range
/// queries may result in several reply messages. This implementation enqueues
- /// all reply messages into pending events.
+ /// all reply messages into pending events. Each message will allocate just under 65KiB. A full
+ /// sync of the public routing table with 128k channels will generated 16 messages and allocate ~1MB.
+ /// Logic can be changed to reduce allocation if/when a full sync of the routing table impacts
+ /// memory constrained systems.
fn handle_query_channel_range(&self, their_node_id: &PublicKey, msg: QueryChannelRange) -> Result<(), LightningError> {
log_debug!(self.logger, "Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks);
let network_graph = self.network_graph.read().unwrap();
- let start_scid = scid_from_parts(msg.first_blocknum, 0, 0);
+ let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0);
- // We receive valid queries with end_blocknum that would overflow SCID conversion.
- // Manually cap the ending block to avoid this overflow.
- let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum(), 0xffffff), 0, 0);
+ // We might receive valid queries with end_blocknum that would overflow SCID conversion.
+ // If so, we manually cap the ending block to avoid this overflow.
+ let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0);
// Per spec, we must reply to a query. Send an empty message when things are invalid.
- if msg.chain_hash != network_graph.genesis_hash || start_scid.is_err() || exclusive_end_scid.is_err() {
+ if msg.chain_hash != network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 {
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push(MessageSendEvent::SendReplyChannelRange {
node_id: their_node_id.clone(),
short_channel_ids: vec![],
}
});
- return Ok(());
+ return Err(LightningError {
+ err: String::from("query_channel_range could not be processed"),
+ action: ErrorAction::IgnoreError,
+ });
}
// Creates channel batches. We are not checking if the channel is routable
// (has at least one update). A peer may still want to know the channel
// exists even if its not yet routable.
- let mut batches: Vec<Vec<u64>> = vec![Vec::with_capacity(self.max_reply_scids)];
- for (_, ref chan) in network_graph.get_channels().range(start_scid.unwrap()..exclusive_end_scid.unwrap()) {
+ let mut batches: Vec<Vec<u64>> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)];
+ for (_, ref chan) in network_graph.get_channels().range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) {
if let Some(chan_announcement) = &chan.announcement_message {
// Construct a new batch if last one is full
if batches.last().unwrap().len() == batches.last().unwrap().capacity() {
- batches.push(Vec::with_capacity(self.max_reply_scids));
+ batches.push(Vec::with_capacity(MAX_SCIDS_PER_REPLY));
}
let batch = batches.last_mut().unwrap();
drop(network_graph);
let mut pending_events = self.pending_events.lock().unwrap();
- let mut batch_index = 0;
let batch_count = batches.len();
- for batch in batches.into_iter() {
- // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum.
- // Use the query's values since we don't use pre-processed reply ranges.
- let first_blocknum = if batch_index == 0 {
- msg.first_blocknum
- }
- // Subsequent replies must be >= the last sent first_blocknum. Use the first block
- // in the new batch.
- else {
- block_from_scid(batch.first().unwrap())
- };
-
- // Per spec, the last end_block needs to be >= the query's end_block. Last
- // reply calculates difference between the query's end_blocknum and the start of the reply.
- // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and first_blocknum
- // will be either msg.first_blocknum or a higher block height.
+ for (batch_index, batch) in batches.into_iter().enumerate() {
+ // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum and subsequent
+ // must be >= the prior reply. We'll simplify this by using zero since its still spec compliant and
+ // sequence completion is now explicitly.
+ let first_blocknum = 0;
+
+ // Per spec, the final end_blocknum needs to be >= the query's end_blocknum, so we'll use the
+ // query's value. Prior batches must use the number of blocks that fit into the message. We'll
+ // base this off the last SCID in the batch since we've somewhat abusing first_blocknum.
let number_of_blocks = if batch_index == batch_count-1 {
- msg.end_blocknum() - first_blocknum
- }
- // Prior replies should use the number of blocks that fit into the reply. Overflow
- // safe since first_blocknum is always <= last SCID's block.
- else {
- block_from_scid(batch.last().unwrap()) - first_blocknum + 1
+ msg.end_blocknum()
+ } else {
+ block_from_scid(batch.last().unwrap()) + 1
};
// Only true for the last message in a sequence
short_channel_ids: batch,
}
});
-
- batch_index += 1;
}
Ok(())
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let mut ret = Vec::new();
let mut pending_events = self.pending_events.lock().unwrap();
- std::mem::swap(&mut ret, &mut pending_events);
+ core::mem::swap(&mut ret, &mut pending_events);
ret
}
}
use util::logger::Logger;
use util::ser::{Readable, Writeable};
use util::events::{MessageSendEvent, MessageSendEventsProvider};
+ use util::scid_utils::scid_from_parts;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hashes::Hash;
#[test]
fn handling_query_channel_range() {
- let (secp_ctx, mut net_graph_msg_handler) = create_net_graph_msg_handler();
+ let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler();
let chain_hash = genesis_block(Network::Testnet).header.block_hash();
let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
let bitcoin_key_1 = PublicKey::from_secret_key(&secp_ctx, node_1_btckey);
let bitcoin_key_2 = PublicKey::from_secret_key(&secp_ctx, node_2_btckey);
- let scids: Vec<u64> = vec![
- 0x000000_000000_0000, // 0x0x0
- 0x000001_000000_0000, // 1x0x0
- 0x000002_000000_0000, // 2x0x0
- 0x000002_000001_0000, // 2x1x0
- 0x000100_000000_0000, // 256x0x0
- 0x000101_000000_0000, // 257x0x0
- 0xfffffe_ffffff_ffff, // max
- 0xffffff_ffffff_ffff, // never
+ let mut scids: Vec<u64> = vec![
+ scid_from_parts(0xfffffe, 0xffffff, 0xffff).unwrap(), // max
+ scid_from_parts(0xffffff, 0xffffff, 0xffff).unwrap(), // never
];
+ // used for testing multipart reply across blocks
+ for block in 100000..=108001 {
+ scids.push(scid_from_parts(block, 0, 0).unwrap());
+ }
+
+ // used for testing resumption on same block
+ scids.push(scid_from_parts(108001, 1, 0).unwrap());
+
for scid in scids {
let unsigned_announcement = UnsignedChannelAnnouncement {
features: ChannelFeatures::known(),
};
}
- // Empty reply when number_of_blocks=0
- test_handling_query_channel_range(
+ // Error when number_of_blocks=0
+ do_handling_query_channel_range(
&net_graph_msg_handler,
&node_id_2,
QueryChannelRange {
first_blocknum: 0,
number_of_blocks: 0,
},
+ false,
vec![ReplyChannelRange {
chain_hash: chain_hash.clone(),
first_blocknum: 0,
}]
);
- // Empty when wrong chain
- test_handling_query_channel_range(
+ // Error when wrong chain
+ do_handling_query_channel_range(
&net_graph_msg_handler,
&node_id_2,
QueryChannelRange {
first_blocknum: 0,
number_of_blocks: 0xffff_ffff,
},
+ false,
vec![ReplyChannelRange {
chain_hash: genesis_block(Network::Bitcoin).header.block_hash(),
first_blocknum: 0,
}]
);
- // Empty reply when first_blocknum > 0xffffff
- test_handling_query_channel_range(
+ // Error when first_blocknum > 0xffffff
+ do_handling_query_channel_range(
&net_graph_msg_handler,
&node_id_2,
QueryChannelRange {
chain_hash: chain_hash.clone(),
first_blocknum: 0x01000000,
- number_of_blocks: 0xffffffff,
+ number_of_blocks: 0xffff_ffff,
},
+ false,
vec![ReplyChannelRange {
chain_hash: chain_hash.clone(),
first_blocknum: 0x01000000,
- number_of_blocks: 0xffffffff,
+ number_of_blocks: 0xffff_ffff,
sync_complete: true,
short_channel_ids: vec![]
}]
);
- // Empty reply when max valid SCID block num.
- // Unlike prior test this is a valid but no results are found
- test_handling_query_channel_range(
+ // Empty reply when max valid SCID block num
+ do_handling_query_channel_range(
&net_graph_msg_handler,
&node_id_2,
QueryChannelRange {
first_blocknum: 0xffffff,
number_of_blocks: 1,
},
+ true,
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0xffffff,
- number_of_blocks: 1,
+ first_blocknum: 0,
+ number_of_blocks: 0x01000000,
sync_complete: true,
short_channel_ids: vec![]
},
);
// No results in valid query range
- test_handling_query_channel_range(
+ do_handling_query_channel_range(
&net_graph_msg_handler,
&node_id_2,
QueryChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0x00100000,
+ first_blocknum: 1000,
number_of_blocks: 1000,
},
+ true,
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0x00100000,
- number_of_blocks: 1000,
+ first_blocknum: 0,
+ number_of_blocks: 2000,
sync_complete: true,
short_channel_ids: vec![],
}
]
);
- // Single reply - all blocks
- test_handling_query_channel_range(
+ // Overflow first_blocknum + number_of_blocks
+ do_handling_query_channel_range(
&net_graph_msg_handler,
&node_id_2,
QueryChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0,
+ first_blocknum: 0xfe0000,
number_of_blocks: 0xffffffff,
},
+ true,
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
number_of_blocks: 0xffffffff,
sync_complete: true,
short_channel_ids: vec![
- 0x000000_000000_0000, // 0x0x0
- 0x000001_000000_0000, // 1x0x0
- 0x000002_000000_0000, // 2x0x0
- 0x000002_000001_0000, // 2x1x0
- 0x000100_000000_0000, // 256x0x0
- 0x000101_000000_0000, // 257x0x0
0xfffffe_ffffff_ffff, // max
]
}
]
);
- // Single reply - overflow of first_blocknum + number_of_blocks
- test_handling_query_channel_range(
+ // Single block exactly full
+ do_handling_query_channel_range(
&net_graph_msg_handler,
&node_id_2,
QueryChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 1,
- number_of_blocks: 0xffffffff,
- },
- vec![
- ReplyChannelRange {
- chain_hash: chain_hash.clone(),
- first_blocknum: 1,
- number_of_blocks: 0xfffffffe,
- sync_complete: true,
- short_channel_ids: vec![
- 0x000001_000000_0000, // 1x0x0
- 0x000002_000000_0000, // 2x0x0
- 0x000002_000001_0000, // 2x1x0
- 0x000100_000000_0000, // 256x0x0
- 0x000101_000000_0000, // 257x0x0
- 0xfffffe_ffffff_ffff, // max
- ]
- }
- ]
- );
-
- // Single reply - query larger than found results
- test_handling_query_channel_range(
- &net_graph_msg_handler,
- &node_id_2,
- QueryChannelRange {
- chain_hash: chain_hash.clone(),
- first_blocknum: 100,
- number_of_blocks: 1000,
- },
- vec![
- ReplyChannelRange {
- chain_hash: chain_hash.clone(),
- first_blocknum: 100,
- number_of_blocks: 1000,
- sync_complete: true,
- short_channel_ids: vec![
- 0x000100_000000_0000, // 256x0x0
- 0x000101_000000_0000, // 257x0x0
- ]
- }
- ]
- );
-
- // Tests below here will chunk replies
- net_graph_msg_handler.max_reply_scids = 1;
-
- // Multipart - new block per messages
- test_handling_query_channel_range(
- &net_graph_msg_handler,
- &node_id_2,
- QueryChannelRange {
- chain_hash: chain_hash.clone(),
- first_blocknum: 0,
- number_of_blocks: 2,
+ first_blocknum: 100000,
+ number_of_blocks: 8000,
},
+ true,
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
first_blocknum: 0,
- number_of_blocks: 1,
- sync_complete: false,
- short_channel_ids: vec![
- 0x000000_000000_0000, // 0x0x0
- ]
- },
- ReplyChannelRange {
- chain_hash: chain_hash.clone(),
- first_blocknum: 1,
- number_of_blocks: 1,
+ number_of_blocks: 108000,
sync_complete: true,
- short_channel_ids: vec![
- 0x000001_000000_0000, // 1x0x0
- ]
+ short_channel_ids: (100000..=107999)
+ .map(|block| scid_from_parts(block, 0, 0).unwrap())
+ .collect(),
},
]
);
- // Multiplart - resumption of same block
- test_handling_query_channel_range(
+ // Multiple split on new block
+ do_handling_query_channel_range(
&net_graph_msg_handler,
&node_id_2,
QueryChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 2,
- number_of_blocks: 1,
+ first_blocknum: 100000,
+ number_of_blocks: 8001,
},
+ true,
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 2,
- number_of_blocks: 1,
+ first_blocknum: 0,
+ number_of_blocks: 108000,
sync_complete: false,
- short_channel_ids: vec![
- 0x000002_000000_0000, // 2x0x0
- ]
+ short_channel_ids: (100000..=107999)
+ .map(|block| scid_from_parts(block, 0, 0).unwrap())
+ .collect(),
},
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 2,
- number_of_blocks: 1,
+ first_blocknum: 0,
+ number_of_blocks: 108001,
sync_complete: true,
short_channel_ids: vec![
- 0x000002_000001_0000, // 2x1x0
- ]
+ scid_from_parts(108000, 0, 0).unwrap(),
+ ],
}
]
);
- // Multipart - query larger than found results, similar to single reply
- test_handling_query_channel_range(
+ // Multiple split on same block
+ do_handling_query_channel_range(
&net_graph_msg_handler,
&node_id_2,
QueryChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 100,
- number_of_blocks: 1000,
+ first_blocknum: 100002,
+ number_of_blocks: 8000,
},
+ true,
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 100, // <= query first_blocknum
- number_of_blocks: 157,
+ first_blocknum: 0,
+ number_of_blocks: 108002,
sync_complete: false,
- short_channel_ids: vec![
- 0x000100_000000_0000, // 256x0x0
- ]
+ short_channel_ids: (100002..=108001)
+ .map(|block| scid_from_parts(block, 0, 0).unwrap())
+ .collect(),
},
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 257,
- number_of_blocks: 843, // >= query first_blocknum+number_of_blocks
+ first_blocknum: 0,
+ number_of_blocks: 108002,
sync_complete: true,
short_channel_ids: vec![
- 0x000101_000000_0000, // 257x0x0
- ]
+ scid_from_parts(108001, 1, 0).unwrap(),
+ ],
}
]
);
}
- fn test_handling_query_channel_range(
+ fn do_handling_query_channel_range(
net_graph_msg_handler: &NetGraphMsgHandler<Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>,
test_node_id: &PublicKey,
msg: QueryChannelRange,
+ expected_ok: bool,
expected_replies: Vec<ReplyChannelRange>
) {
let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg);
- assert!(result.is_ok());
+
+ if expected_ok {
+ assert!(result.is_ok());
+ } else {
+ assert!(result.is_err());
+ }
let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
assert_eq!(events.len(), expected_replies.len());