X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Frouting%2Fnetwork_graph.rs;h=9e4813e9155926fac85b20d307af1bb9ac8a1dc8;hb=2745bd5ac776c48950bcb630338538d31a9615d0;hp=80a7010bd18dd50de0a9fa0854ba74f99f61f707;hpb=4353d4a11c12c148fbf3e95dd2e2f824f60433df;p=rust-lightning diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 80a7010b..9e4813e9 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -35,9 +35,9 @@ use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; use prelude::*; use alloc::collections::{BTreeMap, btree_map::Entry as BtreeEntry}; use core::{cmp, fmt}; -use std::sync::{RwLock, RwLockReadGuard}; +use sync::{RwLock, RwLockReadGuard}; use core::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Mutex; +use sync::Mutex; use core::ops::Deref; use bitcoin::hashes::hex::ToHex; @@ -377,23 +377,39 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh let mut pending_events = self.pending_events.lock().unwrap(); let batch_count = batches.len(); + let mut prev_batch_endblock = msg.first_blocknum; for (batch_index, batch) in batches.into_iter().enumerate() { - // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum and subsequent - // must be >= the prior reply. We'll simplify this by using zero since its still spec compliant and - // sequence completion is now explicitly. - let first_blocknum = 0; - - // Per spec, the final end_blocknum needs to be >= the query's end_blocknum, so we'll use the - // query's value. Prior batches must use the number of blocks that fit into the message. We'll - // base this off the last SCID in the batch since we've somewhat abusing first_blocknum. - let number_of_blocks = if batch_index == batch_count-1 { - msg.end_blocknum() - } else { - block_from_scid(batch.last().unwrap()) + 1 + // Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum` + // and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`. + // + // Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each + // reply is >= the previous reply's `first_blocknum` and either exactly the previous + // reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a + // significant diversion from the requirements set by the spec, and, in case of blocks + // with no channel opens (e.g. empty blocks), requires that we use the previous value + // and *not* derive the first_blocknum from the actual first block of the reply. + let first_blocknum = prev_batch_endblock; + + // Each message carries the number of blocks (from the `first_blocknum`) its contents + // fit in. Though there is no requirement that we use exactly the number of blocks its + // contents are from, except for the bogus requirements c-lightning enforces, above. + // + // Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be + // >= the query's end block. Thus, for the last reply, we calculate the difference + // between the query's end block and the start of the reply. + // + // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and + // first_blocknum will be either msg.first_blocknum or a higher block height. + let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 { + (true, msg.end_blocknum() - first_blocknum) + } + // Prior replies should use the number of blocks that fit into the reply. Overflow + // safe since first_blocknum is always <= last SCID's block. + else { + (false, block_from_scid(batch.last().unwrap()) - first_blocknum) }; - // Only true for the last message in a sequence - let sync_complete = batch_index == batch_count - 1; + prev_batch_endblock = first_blocknum + number_of_blocks; pending_events.push(MessageSendEvent::SendReplyChannelRange { node_id: their_node_id.clone(), @@ -1072,7 +1088,7 @@ mod tests { use bitcoin::secp256k1::{All, Secp256k1}; use prelude::*; - use std::sync::Arc; + use sync::Arc; fn create_net_graph_msg_handler() -> (Secp256k1, NetGraphMsgHandler, Arc>) { let secp_ctx = Secp256k1::new(); @@ -2239,8 +2255,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 0x01000000, + first_blocknum: 0xffffff, + number_of_blocks: 1, sync_complete: true, short_channel_ids: vec![] }, @@ -2260,8 +2276,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 2000, + first_blocknum: 1000, + number_of_blocks: 1000, sync_complete: true, short_channel_ids: vec![], } @@ -2281,8 +2297,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 0xffffffff, + first_blocknum: 0xfe0000, + number_of_blocks: 0xffffffff - 0xfe0000, sync_complete: true, short_channel_ids: vec![ 0xfffffe_ffffff_ffff, // max @@ -2304,8 +2320,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108000, + first_blocknum: 100000, + number_of_blocks: 8000, sync_complete: true, short_channel_ids: (100000..=107999) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@ -2327,8 +2343,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108000, + first_blocknum: 100000, + number_of_blocks: 7999, sync_complete: false, short_channel_ids: (100000..=107999) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@ -2336,8 +2352,8 @@ mod tests { }, ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108001, + first_blocknum: 107999, + number_of_blocks: 2, sync_complete: true, short_channel_ids: vec![ scid_from_parts(108000, 0, 0).unwrap(), @@ -2359,8 +2375,8 @@ mod tests { vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108002, + first_blocknum: 100002, + number_of_blocks: 7999, sync_complete: false, short_channel_ids: (100002..=108001) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@ -2368,8 +2384,8 @@ mod tests { }, ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108002, + first_blocknum: 108001, + number_of_blocks: 1, sync_complete: true, short_channel_ids: vec![ scid_from_parts(108001, 1, 0).unwrap(), @@ -2386,6 +2402,9 @@ mod tests { expected_ok: bool, expected_replies: Vec ) { + let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1); + let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum; + let query_end_blocknum = msg.end_blocknum(); let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg); if expected_ok { @@ -2407,6 +2426,17 @@ mod tests { assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks); assert_eq!(msg.sync_complete, expected_reply.sync_complete); assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids); + + // Enforce exactly the sequencing requirements present on c-lightning v0.9.3 + assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1)); + assert!(msg.first_blocknum >= max_firstblocknum); + max_firstblocknum = msg.first_blocknum; + c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks); + + // Check that the last block count is >= the query's end_blocknum + if i == events.len() - 1 { + assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum); + } }, _ => panic!("expected MessageSendEvent::SendReplyChannelRange"), }