use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
use ln::msgs;
use util::ser::{Writeable, Readable, Writer};
-use util::logger::Logger;
+use util::logger::{Logger, Level};
use util::events::{MessageSendEvent, MessageSendEventsProvider};
use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
use prelude::*;
use alloc::collections::{BTreeMap, btree_map::Entry as BtreeEntry};
use core::{cmp, fmt};
-use std::sync::{RwLock, RwLockReadGuard};
+use sync::{RwLock, RwLockReadGuard};
use core::sync::atomic::{AtomicUsize, Ordering};
-use std::sync::Mutex;
+use sync::Mutex;
use core::ops::Deref;
use bitcoin::hashes::hex::ToHex;
fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) {
match update {
&msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => {
+ let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1);
+ log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}abled.", msg.contents.short_channel_id, if chan_enabled { "en" } else { "dis" });
let _ = self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx);
},
&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } => {
+ log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, short_channel_id);
self.network_graph.write().unwrap().close_channel_from_update(short_channel_id, is_permanent);
},
&msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent } => {
+ log_debug!(self.logger, "{} node graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, node_id);
self.network_graph.write().unwrap().fail_node(node_id, is_permanent);
},
}
let mut pending_events = self.pending_events.lock().unwrap();
let batch_count = batches.len();
+ let mut prev_batch_endblock = msg.first_blocknum;
for (batch_index, batch) in batches.into_iter().enumerate() {
- // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum and subsequent
- // must be >= the prior reply. We'll simplify this by using zero since its still spec compliant and
- // sequence completion is now explicitly.
- let first_blocknum = 0;
-
- // Per spec, the final end_blocknum needs to be >= the query's end_blocknum, so we'll use the
- // query's value. Prior batches must use the number of blocks that fit into the message. We'll
- // base this off the last SCID in the batch since we've somewhat abusing first_blocknum.
- let number_of_blocks = if batch_index == batch_count-1 {
- msg.end_blocknum()
- } else {
- block_from_scid(batch.last().unwrap()) + 1
+ // Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum`
+ // and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`.
+ //
+ // Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each
+ // reply is >= the previous reply's `first_blocknum` and either exactly the previous
+ // reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a
+ // significant diversion from the requirements set by the spec, and, in case of blocks
+ // with no channel opens (e.g. empty blocks), requires that we use the previous value
+ // and *not* derive the first_blocknum from the actual first block of the reply.
+ let first_blocknum = prev_batch_endblock;
+
+ // Each message carries the number of blocks (from the `first_blocknum`) its contents
+ // fit in. Though there is no requirement that we use exactly the number of blocks its
+ // contents are from, except for the bogus requirements c-lightning enforces, above.
+ //
+ // Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be
+ // >= the query's end block. Thus, for the last reply, we calculate the difference
+ // between the query's end block and the start of the reply.
+ //
+ // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and
+ // first_blocknum will be either msg.first_blocknum or a higher block height.
+ let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 {
+ (true, msg.end_blocknum() - first_blocknum)
+ }
+ // Prior replies should use the number of blocks that fit into the reply. Overflow
+ // safe since first_blocknum is always <= last SCID's block.
+ else {
+ (false, block_from_scid(batch.last().unwrap()) - first_blocknum)
};
- // Only true for the last message in a sequence
- let sync_complete = batch_index == batch_count - 1;
+ prev_batch_endblock = first_blocknum + number_of_blocks;
pending_events.push(MessageSendEvent::SendReplyChannelRange {
node_id: their_node_id.clone(),
}
impl_writeable_tlv_based!(DirectionalChannelInfo, {
- (0, last_update),
- (2, enabled),
- (4, cltv_expiry_delta),
- (6, htlc_minimum_msat),
- (8, htlc_maximum_msat),
- (10, fees),
- (12, last_update_message),
-}, {}, {});
+ (0, last_update, required),
+ (2, enabled, required),
+ (4, cltv_expiry_delta, required),
+ (6, htlc_minimum_msat, required),
+ (8, htlc_maximum_msat, required),
+ (10, fees, required),
+ (12, last_update_message, required),
+});
#[derive(Clone, Debug, PartialEq)]
/// Details about a channel (both directions).
}
impl_writeable_tlv_based!(ChannelInfo, {
- (0, features),
- (2, node_one),
- (4, one_to_two),
- (6, node_two),
- (8, two_to_one),
- (10, capacity_sats),
- (12, announcement_message),
-}, {}, {});
+ (0, features, required),
+ (2, node_one, required),
+ (4, one_to_two, required),
+ (6, node_two, required),
+ (8, two_to_one, required),
+ (10, capacity_sats, required),
+ (12, announcement_message, required),
+});
/// Fees for routing via a given channel or a node
pub proportional_millionths: u32,
}
-impl_writeable_tlv_based!(RoutingFees, {(0, base_msat), (2, proportional_millionths)}, {}, {});
+impl_writeable_tlv_based!(RoutingFees, {
+ (0, base_msat, required),
+ (2, proportional_millionths, required)
+});
#[derive(Clone, Debug, PartialEq)]
/// Information received in the latest node_announcement from this node.
}
impl_writeable_tlv_based!(NodeAnnouncementInfo, {
- (0, features),
- (2, last_update),
- (4, rgb),
- (6, alias),
-}, {
- (8, announcement_message),
-}, {
- (10, addresses),
+ (0, features, required),
+ (2, last_update, required),
+ (4, rgb, required),
+ (6, alias, required),
+ (8, announcement_message, option),
+ (10, addresses, vec_type),
});
#[derive(Clone, Debug, PartialEq)]
}
}
-impl_writeable_tlv_based!(NodeInfo, {}, {
- (0, lowest_inbound_channel_fees),
- (2, announcement_info),
-}, {
- (4, channels),
+impl_writeable_tlv_based!(NodeInfo, {
+ (0, lowest_inbound_channel_fees, option),
+ (2, announcement_info, option),
+ (4, channels, vec_type),
});
const SERIALIZATION_VERSION: u8 = 1;
node_info.write(writer)?;
}
- write_tlv_fields!(writer, {}, {});
+ write_tlv_fields!(writer, {});
Ok(())
}
}
let node_info = Readable::read(reader)?;
nodes.insert(node_id, node_info);
}
- read_tlv_fields!(reader, {}, {});
+ read_tlv_fields!(reader, {});
Ok(NetworkGraph {
genesis_hash,
Some(node) => {
if let Some(node_info) = node.announcement_info.as_ref() {
if node_info.last_update >= msg.timestamp {
- return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError});
+ return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)});
}
}
Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.short_channel_id);
*entry.get_mut() = chan_info;
} else {
- return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError})
+ return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)})
}
},
BtreeEntry::Vacant(entry) => {
( $target: expr, $src_node: expr) => {
if let Some(existing_chan_info) = $target.as_ref() {
if existing_chan_info.last_update >= msg.timestamp {
- return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError});
+ return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)});
}
chan_was_enabled = existing_chan_info.enabled;
} else {
use bitcoin::secp256k1::{All, Secp256k1};
use prelude::*;
- use std::sync::Arc;
+ use sync::Arc;
fn create_net_graph_msg_handler() -> (Secp256k1<All>, NetGraphMsgHandler<Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>) {
let secp_ctx = Secp256k1::new();
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0,
- number_of_blocks: 0x01000000,
+ first_blocknum: 0xffffff,
+ number_of_blocks: 1,
sync_complete: true,
short_channel_ids: vec![]
},
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0,
- number_of_blocks: 2000,
+ first_blocknum: 1000,
+ number_of_blocks: 1000,
sync_complete: true,
short_channel_ids: vec![],
}
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0,
- number_of_blocks: 0xffffffff,
+ first_blocknum: 0xfe0000,
+ number_of_blocks: 0xffffffff - 0xfe0000,
sync_complete: true,
short_channel_ids: vec![
0xfffffe_ffffff_ffff, // max
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0,
- number_of_blocks: 108000,
+ first_blocknum: 100000,
+ number_of_blocks: 8000,
sync_complete: true,
short_channel_ids: (100000..=107999)
.map(|block| scid_from_parts(block, 0, 0).unwrap())
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0,
- number_of_blocks: 108000,
+ first_blocknum: 100000,
+ number_of_blocks: 7999,
sync_complete: false,
short_channel_ids: (100000..=107999)
.map(|block| scid_from_parts(block, 0, 0).unwrap())
},
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0,
- number_of_blocks: 108001,
+ first_blocknum: 107999,
+ number_of_blocks: 2,
sync_complete: true,
short_channel_ids: vec![
scid_from_parts(108000, 0, 0).unwrap(),
vec![
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0,
- number_of_blocks: 108002,
+ first_blocknum: 100002,
+ number_of_blocks: 7999,
sync_complete: false,
short_channel_ids: (100002..=108001)
.map(|block| scid_from_parts(block, 0, 0).unwrap())
},
ReplyChannelRange {
chain_hash: chain_hash.clone(),
- first_blocknum: 0,
- number_of_blocks: 108002,
+ first_blocknum: 108001,
+ number_of_blocks: 1,
sync_complete: true,
short_channel_ids: vec![
scid_from_parts(108001, 1, 0).unwrap(),
expected_ok: bool,
expected_replies: Vec<ReplyChannelRange>
) {
+ let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1);
+ let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum;
+ let query_end_blocknum = msg.end_blocknum();
let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg);
if expected_ok {
assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks);
assert_eq!(msg.sync_complete, expected_reply.sync_complete);
assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids);
+
+ // Enforce exactly the sequencing requirements present on c-lightning v0.9.3
+ assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1));
+ assert!(msg.first_blocknum >= max_firstblocknum);
+ max_firstblocknum = msg.first_blocknum;
+ c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks);
+
+ // Check that the last block count is >= the query's end_blocknum
+ if i == events.len() - 1 {
+ assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum);
+ }
},
_ => panic!("expected MessageSendEvent::SendReplyChannelRange"),
}