done
- name: Upload coverage
if: matrix.coverage
- uses: codecov/codecov-action@v1
+ uses: codecov/codecov-action@v2
with:
# Could you use this to fake the coverage report for your PR? Sure.
# Will anyone be impressed by your amazing coverage? No
force_close_spend_delay: None,
is_outbound: true, is_funding_locked: true,
is_usable: true, is_public: true,
+ balance_msat: 0,
outbound_capacity_msat: 0,
});
}
macro_rules! check_persisted_data {
($node: expr, $filepath: expr, $expected_bytes: expr) => {
- match $node.write(&mut $expected_bytes) {
- Ok(()) => {
- loop {
+ loop {
+ $expected_bytes.clear();
+ match $node.write(&mut $expected_bytes) {
+ Ok(()) => {
match std::fs::read($filepath) {
Ok(bytes) => {
if bytes == $expected_bytes {
},
Err(_) => continue
}
- }
- },
- Err(e) => panic!("Unexpected error: {}", e)
+ },
+ Err(e) => panic!("Unexpected error: {}", e)
+ }
}
}
}
holding_cell_update_fee: Option<u32>,
next_holder_htlc_id: u64,
next_counterparty_htlc_id: u64,
- update_time_counter: u32,
feerate_per_kw: u32,
+ /// The timestamp set on our latest `channel_update` message for this channel. It is updated
+ /// when the channel is updated in ways which may impact the `channel_update` message or when a
+ /// new block is received, ensuring it's always at least moderately close to the current real
+ /// time.
+ update_time_counter: u32,
+
#[cfg(debug_assertions)]
/// Max to_local and to_remote outputs in a locally-generated commitment transaction
holder_max_commitment_tx_output: Mutex<(u64, u64)>,
where F::Target: FeeEstimator
{
let lower_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
- if feerate_per_kw < lower_limit {
- return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
+ // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
+ // occasional issues with feerate disagreements between an initiator that wants a feerate
+ // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
+ // sat/kw before the comparison here.
+ if feerate_per_kw + 250 < lower_limit {
+ return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
}
// We only bound the fee updates on the upper side to prevent completely absurd feerates,
// always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
/// Doesn't bother handling the
/// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
/// corner case properly.
+ /// The channel reserve is subtracted from each balance.
+ /// See also [`Channel::get_balance_msat`]
pub fn get_inbound_outbound_available_balance_msat(&self) -> (u64, u64) {
// Note that we have to handle overflow due to the above case.
(
)
}
+ /// Get our total balance in msat.
+ /// This is the amount that would go to us if we close the channel, ignoring any on-chain fees.
+ /// See also [`Channel::get_inbound_outbound_available_balance_msat`]
+ pub fn get_balance_msat(&self) -> u64 {
+ self.value_to_self_msat
+ - self.get_outbound_pending_htlc_stats(None).pending_htlcs_value_msat
+ }
+
pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
(self.holder_selected_channel_reserve_satoshis, self.counterparty_selected_channel_reserve_satoshis)
}
}
pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
+ self.update_time_counter += 1;
self.channel_update_status = status;
}
pub unspendable_punishment_reserve: Option<u64>,
/// The `user_channel_id` passed in to create_channel, or 0 if the channel was inbound.
pub user_channel_id: u64,
+ /// Our total balance. This is the amount we would get if we close the channel.
+ /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
+ /// amount is not likely to be recoverable on close.
+ ///
+ /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose
+ /// balance is not available for inclusion in new outbound HTLCs). This further does not include
+ /// any pending outgoing HTLCs which are awaiting some other resolution to be sent.
+ /// This does not consider any on-chain fees.
+ ///
+ /// See also [`ChannelDetails::outbound_capacity_msat`]
+ pub balance_msat: u64,
/// The available outbound capacity for sending HTLCs to the remote peer. This does not include
- /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
+ /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new outbound HTLCs). This further does not include any pending
/// outgoing HTLCs which are awaiting some other resolution to be sent.
///
+ /// See also [`ChannelDetails::balance_msat`]
+ ///
/// This value is not exact. Due to various in-flight changes, feerate changes, and our
/// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
/// should be able to spend nearly this amount.
pub outbound_capacity_msat: u64,
/// The available inbound capacity for the remote peer to send HTLCs to us. This does not
- /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
+ /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new inbound HTLCs).
/// Note that there are some corner cases not fully handled here, so the actual available
/// inbound capacity may be slightly higher than this.
res.reserve(channel_state.by_id.len());
for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
+ let balance_msat = channel.get_balance_msat();
let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
channel.get_holder_counterparty_selected_channel_reserve_satoshis();
res.push(ChannelDetails {
short_channel_id: channel.get_short_channel_id(),
channel_value_satoshis: channel.get_value_satoshis(),
unspendable_punishment_reserve: to_self_reserve_satoshis,
+ balance_msat,
inbound_capacity_msat,
outbound_capacity_msat,
user_channel_id: channel.get_user_id(),
,
// Byte 3
,
+ // Byte 4
+ ,
+ // Byte 5
+ ,
],
optional_features: [
// Byte 0
BasicMPP,
// Byte 3
ShutdownAnySegwit,
+ // Byte 4
+ ,
+ // Byte 5
+ ChannelType,
],
});
define_context!(NodeContext {
// Byte 4
,
// Byte 5
- ,
+ ChannelType,
// Byte 6
Keysend,
],
/// useful for manipulating feature flags.
macro_rules! define_feature {
($odd_bit: expr, $feature: ident, [$($context: ty),+], $doc: expr, $optional_setter: ident,
- $required_setter: ident) => {
+ $required_setter: ident, $supported_getter: ident) => {
#[doc = $doc]
///
/// See [BOLT #9] for details.
<T as $feature>::set_required_bit(&mut self.flags);
self
}
+
+ /// Checks if this feature is supported.
+ pub fn $supported_getter(&self) -> bool {
+ <T as $feature>::supports_feature(&self.flags)
+ }
}
$(
const ASSERT_ODD_BIT_PARITY: usize = (<Self as $feature>::ODD_BIT % 2) - 1;
}
)*
-
+ };
+ ($odd_bit: expr, $feature: ident, [$($context: ty),+], $doc: expr, $optional_setter: ident,
+ $required_setter: ident, $supported_getter: ident, $required_getter: ident) => {
+ define_feature!($odd_bit, $feature, [$($context),+], $doc, $optional_setter, $required_setter, $supported_getter);
+ impl <T: $feature> Features<T> {
+ /// Checks if this feature is required.
+ pub fn $required_getter(&self) -> bool {
+ <T as $feature>::requires_feature(&self.flags)
+ }
+ }
}
}
define_feature!(1, DataLossProtect, [InitContext, NodeContext],
"Feature flags for `option_data_loss_protect`.", set_data_loss_protect_optional,
- set_data_loss_protect_required);
+ set_data_loss_protect_required, supports_data_loss_protect, requires_data_loss_protect);
// NOTE: Per Bolt #9, initial_routing_sync has no even bit.
define_feature!(3, InitialRoutingSync, [InitContext], "Feature flags for `initial_routing_sync`.",
- set_initial_routing_sync_optional, set_initial_routing_sync_required);
+ set_initial_routing_sync_optional, set_initial_routing_sync_required,
+ initial_routing_sync);
define_feature!(5, UpfrontShutdownScript, [InitContext, NodeContext],
"Feature flags for `option_upfront_shutdown_script`.", set_upfront_shutdown_script_optional,
- set_upfront_shutdown_script_required);
+ set_upfront_shutdown_script_required, supports_upfront_shutdown_script,
+ requires_upfront_shutdown_script);
define_feature!(7, GossipQueries, [InitContext, NodeContext],
- "Feature flags for `gossip_queries`.", set_gossip_queries_optional, set_gossip_queries_required);
+ "Feature flags for `gossip_queries`.", set_gossip_queries_optional, set_gossip_queries_required,
+ supports_gossip_queries, requires_gossip_queries);
define_feature!(9, VariableLengthOnion, [InitContext, NodeContext, InvoiceContext],
"Feature flags for `var_onion_optin`.", set_variable_length_onion_optional,
- set_variable_length_onion_required);
+ set_variable_length_onion_required, supports_variable_length_onion,
+ requires_variable_length_onion);
define_feature!(13, StaticRemoteKey, [InitContext, NodeContext, ChannelTypeContext],
"Feature flags for `option_static_remotekey`.", set_static_remote_key_optional,
- set_static_remote_key_required);
+ set_static_remote_key_required, supports_static_remote_key, requires_static_remote_key);
define_feature!(15, PaymentSecret, [InitContext, NodeContext, InvoiceContext],
- "Feature flags for `payment_secret`.", set_payment_secret_optional, set_payment_secret_required);
+ "Feature flags for `payment_secret`.", set_payment_secret_optional, set_payment_secret_required,
+ supports_payment_secret, requires_payment_secret);
define_feature!(17, BasicMPP, [InitContext, NodeContext, InvoiceContext],
- "Feature flags for `basic_mpp`.", set_basic_mpp_optional, set_basic_mpp_required);
+ "Feature flags for `basic_mpp`.", set_basic_mpp_optional, set_basic_mpp_required,
+ supports_basic_mpp, requires_basic_mpp);
define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext],
"Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional,
- set_shutdown_any_segwit_required);
+ set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit);
+ define_feature!(45, ChannelType, [InitContext, NodeContext],
+ "Feature flags for `option_channel_type`.", set_channel_type_optional,
+ set_channel_type_required, supports_channel_type, requires_channel_type);
define_feature!(55, Keysend, [NodeContext],
- "Feature flags for keysend payments.", set_keysend_optional, set_keysend_required);
+ "Feature flags for keysend payments.", set_keysend_optional, set_keysend_required,
+ supports_keysend, requires_keysend);
#[cfg(test)]
define_feature!(123456789, UnknownFeature, [NodeContext, ChannelContext, InvoiceContext],
"Feature flags for an unknown feature used in testing.", set_unknown_feature_optional,
- set_unknown_feature_required);
+ set_unknown_feature_required, supports_unknown_test_feature, requires_unknown_test_feature);
}
/// Tracks the set of features which a node implements, templated by the context in which it
}
}
-impl<T: sealed::DataLossProtect> Features<T> {
- #[cfg(test)]
- pub(crate) fn requires_data_loss_protect(&self) -> bool {
- <T as sealed::DataLossProtect>::requires_feature(&self.flags)
- }
- #[cfg(test)]
- pub(crate) fn supports_data_loss_protect(&self) -> bool {
- <T as sealed::DataLossProtect>::supports_feature(&self.flags)
- }
-}
-
impl<T: sealed::UpfrontShutdownScript> Features<T> {
- #[cfg(test)]
- pub(crate) fn requires_upfront_shutdown_script(&self) -> bool {
- <T as sealed::UpfrontShutdownScript>::requires_feature(&self.flags)
- }
- pub(crate) fn supports_upfront_shutdown_script(&self) -> bool {
- <T as sealed::UpfrontShutdownScript>::supports_feature(&self.flags)
- }
#[cfg(test)]
pub(crate) fn clear_upfront_shutdown_script(mut self) -> Self {
<T as sealed::UpfrontShutdownScript>::clear_bits(&mut self.flags);
impl<T: sealed::GossipQueries> Features<T> {
- #[cfg(test)]
- pub(crate) fn requires_gossip_queries(&self) -> bool {
- <T as sealed::GossipQueries>::requires_feature(&self.flags)
- }
- pub(crate) fn supports_gossip_queries(&self) -> bool {
- <T as sealed::GossipQueries>::supports_feature(&self.flags)
- }
#[cfg(test)]
pub(crate) fn clear_gossip_queries(mut self) -> Self {
<T as sealed::GossipQueries>::clear_bits(&mut self.flags);
}
}
-impl<T: sealed::VariableLengthOnion> Features<T> {
- #[cfg(test)]
- pub(crate) fn requires_variable_length_onion(&self) -> bool {
- <T as sealed::VariableLengthOnion>::requires_feature(&self.flags)
- }
- pub(crate) fn supports_variable_length_onion(&self) -> bool {
- <T as sealed::VariableLengthOnion>::supports_feature(&self.flags)
- }
-}
-
-impl<T: sealed::StaticRemoteKey> Features<T> {
- pub(crate) fn supports_static_remote_key(&self) -> bool {
- <T as sealed::StaticRemoteKey>::supports_feature(&self.flags)
- }
- #[cfg(test)]
- pub(crate) fn requires_static_remote_key(&self) -> bool {
- <T as sealed::StaticRemoteKey>::requires_feature(&self.flags)
- }
-}
-
impl<T: sealed::InitialRoutingSync> Features<T> {
- pub(crate) fn initial_routing_sync(&self) -> bool {
- <T as sealed::InitialRoutingSync>::supports_feature(&self.flags)
- }
// We are no longer setting initial_routing_sync now that gossip_queries
// is enabled. This feature is ignored by a peer when gossip_queries has
// been negotiated.
}
}
-impl<T: sealed::PaymentSecret> Features<T> {
- #[cfg(test)]
- pub(crate) fn requires_payment_secret(&self) -> bool {
- <T as sealed::PaymentSecret>::requires_feature(&self.flags)
- }
- /// Returns whether the `payment_secret` feature is supported.
- pub fn supports_payment_secret(&self) -> bool {
- <T as sealed::PaymentSecret>::supports_feature(&self.flags)
- }
-}
-
-impl<T: sealed::BasicMPP> Features<T> {
- #[cfg(test)]
- pub(crate) fn requires_basic_mpp(&self) -> bool {
- <T as sealed::BasicMPP>::requires_feature(&self.flags)
- }
- // We currently never test for this since we don't actually *generate* multipath routes.
- pub(crate) fn supports_basic_mpp(&self) -> bool {
- <T as sealed::BasicMPP>::supports_feature(&self.flags)
- }
-}
-
impl<T: sealed::ShutdownAnySegwit> Features<T> {
- pub(crate) fn supports_shutdown_anysegwit(&self) -> bool {
- <T as sealed::ShutdownAnySegwit>::supports_feature(&self.flags)
- }
#[cfg(test)]
pub(crate) fn clear_shutdown_anysegwit(mut self) -> Self {
<T as sealed::ShutdownAnySegwit>::clear_bits(&mut self.flags);
assert!(!NodeFeatures::known().requires_basic_mpp());
assert!(!InvoiceFeatures::known().requires_basic_mpp());
+ assert!(InitFeatures::known().supports_channel_type());
+ assert!(NodeFeatures::known().supports_channel_type());
+ assert!(!InitFeatures::known().requires_channel_type());
+ assert!(!NodeFeatures::known().requires_channel_type());
+
assert!(InitFeatures::known().supports_shutdown_anysegwit());
assert!(NodeFeatures::known().supports_shutdown_anysegwit());
// - var_onion_optin (req) | static_remote_key (req) | payment_secret(req)
// - basic_mpp
// - opt_shutdown_anysegwit
- assert_eq!(node_features.flags.len(), 4);
+ // -
+ // - option_channel_type
+ assert_eq!(node_features.flags.len(), 6);
assert_eq!(node_features.flags[0], 0b00000010);
assert_eq!(node_features.flags[1], 0b01010001);
assert_eq!(node_features.flags[2], 0b00000010);
assert_eq!(node_features.flags[3], 0b00001000);
+ assert_eq!(node_features.flags[4], 0b00000000);
+ assert_eq!(node_features.flags[5], 0b00100000);
}
// Check that cleared flags are kept blank when converting back:
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let short_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let short_id_2 = create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let short_id_3 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
// Disconnect peers
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- let mut chans_disabled: HashSet<u64> = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
+ let mut chans_disabled = HashMap::new();
for e in msg_events {
match e {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
// Check that each channel gets updated exactly once
- if !chans_disabled.remove(&msg.contents.short_channel_id) {
+ if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
panic!("Generated ChannelUpdate for wrong chan!");
}
},
nodes[0].node.timer_tick_occurred();
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- chans_disabled = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
for e in msg_events {
match e {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
- // Check that each channel gets updated exactly once
- if !chans_disabled.remove(&msg.contents.short_channel_id) {
- panic!("Generated ChannelUpdate for wrong chan!");
+ match chans_disabled.remove(&msg.contents.short_channel_id) {
+ // Each update should have a higher timestamp than the previous one, replacing
+ // the old one.
+ Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
+ None => panic!("Generated ChannelUpdate for wrong chan!"),
}
},
_ => panic!("Unexpected event"),
}
}
+ // Check that each channel gets updated exactly once
+ assert!(chans_disabled.is_empty());
}
#[test]
/// The peer did something harmless that we weren't able to meaningfully process.
/// If the error is logged, log it at the given level.
IgnoreAndLog(logger::Level),
+ /// The peer provided us with a gossip message which we'd already seen. In most cases this
+ /// should be ignored, but it may result in the message being forwarded if it is a duplicate of
+ /// our own channel announcements.
+ IgnoreDuplicateGossip,
/// The peer did something incorrect. Tell them.
SendErrorMessage {
/// The message to send.
log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err);
continue
},
+ msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these
msgs::ErrorAction::IgnoreError => {
log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err);
continue;
},
MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => {
log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
- if self.message_handler.route_handler.handle_channel_announcement(&msg).is_ok() && self.message_handler.route_handler.handle_channel_update(&update_msg).is_ok() {
- self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None);
- self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None);
+ match self.message_handler.route_handler.handle_channel_announcement(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None),
+ _ => {},
+ }
+ match self.message_handler.route_handler.handle_channel_update(&update_msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None),
+ _ => {},
}
},
MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler");
- if self.message_handler.route_handler.handle_node_announcement(&msg).is_ok() {
- self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None);
+ match self.message_handler.route_handler.handle_node_announcement(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None),
+ _ => {},
}
},
MessageSendEvent::BroadcastChannelUpdate { msg } => {
log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
- if self.message_handler.route_handler.handle_channel_update(&msg).is_ok() {
- self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None);
+ match self.message_handler.route_handler.handle_channel_update(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
+ _ => {},
}
},
MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
msgs::ErrorAction::IgnoreAndLog(level) => {
log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
},
+ msgs::ErrorAction::IgnoreDuplicateGossip => {},
msgs::ErrorAction::IgnoreError => {
log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
},
None => Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}),
Some(node) => {
if let Some(node_info) = node.announcement_info.as_ref() {
- if node_info.last_update >= msg.timestamp {
+ // The timestamp field is somewhat of a misnomer - the BOLTs use it to order
+ // updates to ensure you always have the latest one, only vaguely suggesting
+ // that it be at least the current time.
+ if node_info.last_update > msg.timestamp {
return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)});
+ } else if node_info.last_update == msg.timestamp {
+ return Err(LightningError{err: "Update had the same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip});
}
}
Self::remove_channel_in_nodes(&mut nodes, &entry.get(), msg.short_channel_id);
*entry.get_mut() = chan_info;
} else {
- return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)})
+ return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreDuplicateGossip});
}
},
BtreeEntry::Vacant(entry) => {
macro_rules! maybe_update_channel_info {
( $target: expr, $src_node: expr) => {
if let Some(existing_chan_info) = $target.as_ref() {
- if existing_chan_info.last_update >= msg.timestamp {
+ // The timestamp field is somewhat of a misnomer - the BOLTs use it to
+ // order updates to ensure you always have the latest one, only
+ // suggesting that it be at least the current time. For
+ // channel_updates specifically, the BOLTs discuss the possibility of
+ // pruning based on the timestamp field being more than two weeks old,
+ // but only in the non-normative section.
+ if existing_chan_info.last_update > msg.timestamp {
return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip)});
+ } else if existing_chan_info.last_update == msg.timestamp {
+ return Err(LightningError{err: "Update had same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip});
}
chan_was_enabled = existing_chan_info.enabled;
} else {
match net_graph_msg_handler.handle_channel_update(&valid_channel_update) {
Ok(_) => panic!(),
- Err(e) => assert_eq!(e.err, "Update older than last processed update")
+ Err(e) => assert_eq!(e.err, "Update had same timestamp as last processed update")
};
unsigned_channel_update.timestamp += 500;
short_channel_id,
channel_value_satoshis: 0,
user_channel_id: 0,
+ balance_msat: 0,
outbound_capacity_msat,
inbound_capacity_msat: 42,
unspendable_punishment_reserve: None,