From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Thu, 8 Jul 2021 14:51:47 +0000 (+0000) Subject: Merge pull request #961 from TheBlueMatt/2021-06-workaround-broken-cln X-Git-Tag: v0.0.99~3 X-Git-Url: http://git.bitcoin.ninja/index.cgi?p=rust-lightning;a=commitdiff_plain;h=96a738aa5379d5993a7881203c5a643cff3e7933;hp=-c Merge pull request #961 from TheBlueMatt/2021-06-workaround-broken-cln Use the query start block for ReplyChannelRange response messages --- 96a738aa5379d5993a7881203c5a643cff3e7933 diff --combined lightning/src/routing/network_graph.rs index 80a7010b,3cbcd912..22cd2a92 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@@ -28,7 -28,7 +28,7 @@@ use ln::msgs::{ChannelAnnouncement, Cha use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; -use util::logger::Logger; +use util::logger::{Logger, Level}; use util::events::{MessageSendEvent, MessageSendEventsProvider}; use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; @@@ -169,16 -169,12 +169,16 @@@ impl RoutingMessa fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) { match update { &msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => { + let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1); + log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}abled.", msg.contents.short_channel_id, if chan_enabled { "en" } else { "dis" }); let _ = self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx); }, &msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } => { + log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, short_channel_id); self.network_graph.write().unwrap().close_channel_from_update(short_channel_id, is_permanent); }, &msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent } => { + log_debug!(self.logger, "{} node graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, node_id); self.network_graph.write().unwrap().fail_node(node_id, is_permanent); }, } @@@ -377,23 -373,39 +377,39 @@@ let mut pending_events = self.pending_events.lock().unwrap(); let batch_count = batches.len(); + let mut prev_batch_endblock = msg.first_blocknum; for (batch_index, batch) in batches.into_iter().enumerate() { - // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum and subsequent - // must be >= the prior reply. We'll simplify this by using zero since its still spec compliant and - // sequence completion is now explicitly. - let first_blocknum = 0; - - // Per spec, the final end_blocknum needs to be >= the query's end_blocknum, so we'll use the - // query's value. Prior batches must use the number of blocks that fit into the message. We'll - // base this off the last SCID in the batch since we've somewhat abusing first_blocknum. - let number_of_blocks = if batch_index == batch_count-1 { - msg.end_blocknum() - } else { - block_from_scid(batch.last().unwrap()) + 1 + // Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum` + // and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`. + // + // Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each + // reply is >= the previous reply's `first_blocknum` and either exactly the previous + // reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a + // significant diversion from the requirements set by the spec, and, in case of blocks + // with no channel opens (e.g. empty blocks), requires that we use the previous value + // and *not* derive the first_blocknum from the actual first block of the reply. + let first_blocknum = prev_batch_endblock; + + // Each message carries the number of blocks (from the `first_blocknum`) its contents + // fit in. Though there is no requirement that we use exactly the number of blocks its + // contents are from, except for the bogus requirements c-lightning enforces, above. + // + // Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be + // >= the query's end block. Thus, for the last reply, we calculate the difference + // between the query's end block and the start of the reply. + // + // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and + // first_blocknum will be either msg.first_blocknum or a higher block height. + let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 { + (true, msg.end_blocknum() - first_blocknum) + } + // Prior replies should use the number of blocks that fit into the reply. Overflow + // safe since first_blocknum is always <= last SCID's block. + else { + (false, block_from_scid(batch.last().unwrap()) - first_blocknum) }; - // Only true for the last message in a sequence - let sync_complete = batch_index == batch_count - 1; + prev_batch_endblock = first_blocknum + number_of_blocks; pending_events.push(MessageSendEvent::SendReplyChannelRange { node_id: their_node_id.clone(), @@@ -464,14 -476,14 +480,14 @@@ impl fmt::Display for DirectionalChanne } impl_writeable_tlv_based!(DirectionalChannelInfo, { - (0, last_update), - (2, enabled), - (4, cltv_expiry_delta), - (6, htlc_minimum_msat), - (8, htlc_maximum_msat), - (10, fees), - (12, last_update_message), -}, {}, {}); + (0, last_update, required), + (2, enabled, required), + (4, cltv_expiry_delta, required), + (6, htlc_minimum_msat, required), + (8, htlc_maximum_msat, required), + (10, fees, required), + (12, last_update_message, required), +}); #[derive(Clone, Debug, PartialEq)] /// Details about a channel (both directions). @@@ -505,14 -517,14 +521,14 @@@ impl fmt::Display for ChannelInfo } impl_writeable_tlv_based!(ChannelInfo, { - (0, features), - (2, node_one), - (4, one_to_two), - (6, node_two), - (8, two_to_one), - (10, capacity_sats), - (12, announcement_message), -}, {}, {}); + (0, features, required), + (2, node_one, required), + (4, one_to_two, required), + (6, node_two, required), + (8, two_to_one, required), + (10, capacity_sats, required), + (12, announcement_message, required), +}); /// Fees for routing via a given channel or a node @@@ -525,10 -537,7 +541,10 @@@ pub struct RoutingFees pub proportional_millionths: u32, } -impl_writeable_tlv_based!(RoutingFees, {(0, base_msat), (2, proportional_millionths)}, {}, {}); +impl_writeable_tlv_based!(RoutingFees, { + (0, base_msat, required), + (2, proportional_millionths, required) +}); #[derive(Clone, Debug, PartialEq)] /// Information received in the latest node_announcement from this node. @@@ -554,12 -563,14 +570,12 @@@ pub struct NodeAnnouncementInfo } impl_writeable_tlv_based!(NodeAnnouncementInfo, { - (0, features), - (2, last_update), - (4, rgb), - (6, alias), -}, { - (8, announcement_message), -}, { - (10, addresses), + (0, features, required), + (2, last_update, required), + (4, rgb, required), + (6, alias, required), + (8, announcement_message, option), + (10, addresses, vec_type), }); #[derive(Clone, Debug, PartialEq)] @@@ -585,10 -596,11 +601,10 @@@ impl fmt::Display for NodeInfo } } -impl_writeable_tlv_based!(NodeInfo, {}, { - (0, lowest_inbound_channel_fees), - (2, announcement_info), -}, { - (4, channels), +impl_writeable_tlv_based!(NodeInfo, { + (0, lowest_inbound_channel_fees, option), + (2, announcement_info, option), + (4, channels, vec_type), }); const SERIALIZATION_VERSION: u8 = 1; @@@ -610,7 -622,7 +626,7 @@@ impl Writeable for NetworkGraph node_info.write(writer)?; } - write_tlv_fields!(writer, {}, {}); + write_tlv_fields!(writer, {}); Ok(()) } } @@@ -634,7 -646,7 +650,7 @@@ impl Readable for NetworkGraph let node_info = Readable::read(reader)?; nodes.insert(node_id, node_info); } - read_tlv_fields!(reader, {}, {}); + read_tlv_fields!(reader, {}); Ok(NetworkGraph { genesis_hash, @@@ -717,7 -729,7 +733,7 @@@ impl NetworkGraph Some(node) => { if let Some(node_info) = node.announcement_info.as_ref() { if node_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); } } @@@ -838,7 -850,7 +854,7 @@@ Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.short_channel_id); *entry.get_mut() = chan_info; } else { - return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError}) + return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}) } }, BtreeEntry::Vacant(entry) => { @@@ -940,7 -952,7 +956,7 @@@ ( $target: expr, $src_node: expr) => { if let Some(existing_chan_info) = $target.as_ref() { if existing_chan_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); } chan_was_enabled = existing_chan_info.enabled; } else { @@@ -2239,8 -2251,8 +2255,8 @@@ mod tests vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 0x01000000, + first_blocknum: 0xffffff, + number_of_blocks: 1, sync_complete: true, short_channel_ids: vec![] }, @@@ -2260,8 -2272,8 +2276,8 @@@ vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 2000, + first_blocknum: 1000, + number_of_blocks: 1000, sync_complete: true, short_channel_ids: vec![], } @@@ -2281,8 -2293,8 +2297,8 @@@ vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 0xffffffff, + first_blocknum: 0xfe0000, + number_of_blocks: 0xffffffff - 0xfe0000, sync_complete: true, short_channel_ids: vec![ 0xfffffe_ffffff_ffff, // max @@@ -2304,8 -2316,8 +2320,8 @@@ vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108000, + first_blocknum: 100000, + number_of_blocks: 8000, sync_complete: true, short_channel_ids: (100000..=107999) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@@ -2327,8 -2339,8 +2343,8 @@@ vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108000, + first_blocknum: 100000, + number_of_blocks: 7999, sync_complete: false, short_channel_ids: (100000..=107999) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@@ -2336,8 -2348,8 +2352,8 @@@ }, ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108001, + first_blocknum: 107999, + number_of_blocks: 2, sync_complete: true, short_channel_ids: vec![ scid_from_parts(108000, 0, 0).unwrap(), @@@ -2359,8 -2371,8 +2375,8 @@@ vec![ ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108002, + first_blocknum: 100002, + number_of_blocks: 7999, sync_complete: false, short_channel_ids: (100002..=108001) .map(|block| scid_from_parts(block, 0, 0).unwrap()) @@@ -2368,8 -2380,8 +2384,8 @@@ }, ReplyChannelRange { chain_hash: chain_hash.clone(), - first_blocknum: 0, - number_of_blocks: 108002, + first_blocknum: 108001, + number_of_blocks: 1, sync_complete: true, short_channel_ids: vec![ scid_from_parts(108001, 1, 0).unwrap(), @@@ -2386,6 -2398,9 +2402,9 @@@ expected_ok: bool, expected_replies: Vec ) { + let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1); + let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum; + let query_end_blocknum = msg.end_blocknum(); let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg); if expected_ok { @@@ -2407,6 -2422,17 +2426,17 @@@ assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks); assert_eq!(msg.sync_complete, expected_reply.sync_complete); assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids); + + // Enforce exactly the sequencing requirements present on c-lightning v0.9.3 + assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1)); + assert!(msg.first_blocknum >= max_firstblocknum); + max_firstblocknum = msg.first_blocknum; + c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks); + + // Check that the last block count is >= the query's end_blocknum + if i == events.len() - 1 { + assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum); + } }, _ => panic!("expected MessageSendEvent::SendReplyChannelRange"), }