From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Fri, 7 Jun 2024 19:25:46 +0000 (-0700) Subject: Merge pull request #3083 from valentinewallace/2024-05-ignore-onion-err-chan-upd X-Git-Tag: v0.0.124-beta~92 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=8ca3259bfa90948b8ce73aedc664d31a6bc6b0e1;hp=-c;p=rust-lightning Merge pull request #3083 from valentinewallace/2024-05-ignore-onion-err-chan-upd Ignore channel updates in onion errors --- 8ca3259bfa90948b8ce73aedc664d31a6bc6b0e1 diff --combined lightning/src/ln/functional_test_utils.rs index 11e0737da,440ae11de..7e57f2059 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@@ -35,17 -35,15 +35,17 @@@ use crate::util::test_utils use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface}; use crate::util::ser::{ReadableArgs, Writeable}; +use bitcoin::amount::Amount; use bitcoin::blockdata::block::{Block, Header, Version}; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut}; use bitcoin::hash_types::{BlockHash, TxMerkleNode}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash as _; -use bitcoin::network::constants::Network; +use bitcoin::network::Network; use bitcoin::pow::CompactTarget; use bitcoin::secp256k1::{PublicKey, SecretKey}; +use bitcoin::transaction; use alloc::rc::Rc; use core::cell::RefCell; @@@ -97,7 -95,7 +97,7 @@@ pub fn mine_transaction_without_consist txdata: Vec::new(), }; for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count - block.txdata.push(Transaction { version: 0, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() }); + block.txdata.push(Transaction { version: transaction::Version(0), lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() }); } block.txdata.push((*tx).clone()); do_connect_block_without_consistency_checks(node, block, false); @@@ -115,7 -113,7 +115,7 @@@ pub fn confirm_transactions_at<'a, 'b, } let mut txdata = Vec::new(); for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count - txdata.push(Transaction { version: 0, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() }); + txdata.push(Transaction { version: transaction::Version(0), lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() }); } for tx in txn { txdata.push((*tx).clone()); @@@ -1157,8 -1155,8 +1157,8 @@@ fn internal_create_funding_transaction< Vec::new() }; - let tx = Transaction { version: chan_id as i32, lock_time: LockTime::ZERO, input, output: vec![TxOut { - value: *channel_value_satoshis, script_pubkey: output_script.clone(), + let tx = Transaction { version: transaction::Version(chan_id as i32), lock_time: LockTime::ZERO, input, output: vec![TxOut { + value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint { txid: tx.txid(), index: 0 }; (*temporary_channel_id, tx, funding_outpoint) @@@ -1478,15 -1476,15 +1478,15 @@@ pub fn update_nodes_with_chan_announce< pub fn do_check_spends Option>(tx: &Transaction, get_output: F) { for outp in tx.output.iter() { - assert!(outp.value >= outp.script_pubkey.dust_value().to_sat(), "Spending tx output didn't meet dust limit"); + assert!(outp.value >= outp.script_pubkey.dust_value(), "Spending tx output didn't meet dust limit"); } let mut total_value_in = 0; for input in tx.input.iter() { - total_value_in += get_output(&input.previous_output).unwrap().value; + total_value_in += get_output(&input.previous_output).unwrap().value.to_sat(); } let mut total_value_out = 0; for output in tx.output.iter() { - total_value_out += output.value; + total_value_out += output.value.to_sat(); } let min_fee = (tx.weight().to_wu() as u64 + 3) / 4; // One sat per vbyte (ie per weight/4, rounded up) // Input amount - output amount = fee, so check that out + min_fee is smaller than input @@@ -1500,7 -1498,7 +1500,7 @@@ macro_rules! check_spends { $( for outp in $spends_txn.output.iter() { - assert!(outp.value >= outp.script_pubkey.dust_value().to_sat(), "Input tx output didn't meet dust limit"); + assert!(outp.value >= outp.script_pubkey.dust_value(), "Input tx output didn't meet dust limit"); } )* let get_output = |out_point: &bitcoin::blockdata::transaction::OutPoint| { @@@ -2067,7 -2065,7 +2067,7 @@@ macro_rules! get_payment_preimage_hash /// Gets a route from the given sender to the node described in `payment_params`. pub fn get_route(send_node: &Node, route_params: &RouteParameters) -> Result { let scorer = TestScorer::new(); - let keys_manager = TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet); + let keys_manager = TestKeysInterface::new(&[0u8; 32], Network::Testnet); let random_seed_bytes = keys_manager.get_secure_random_bytes(); router::get_route( &send_node.node.get_our_node_id(), route_params, &send_node.network_graph.read_only(), @@@ -2079,7 -2077,7 +2079,7 @@@ /// Like `get_route` above, but adds a random CLTV offset to the final hop. pub fn find_route(send_node: &Node, route_params: &RouteParameters) -> Result { let scorer = TestScorer::new(); - let keys_manager = TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet); + let keys_manager = TestKeysInterface::new(&[0u8; 32], Network::Testnet); let random_seed_bytes = keys_manager.get_secure_random_bytes(); router::find_route( &send_node.node.get_our_node_id(), route_params, &send_node.network_graph, @@@ -2455,18 -2453,11 +2455,11 @@@ pub fn expect_payment_failed_conditions if let Some(chan_closed) = conditions.expected_blamed_chan_closed { if let PathFailure::OnPath { network_update: Some(upd) } = failure { match upd { - NetworkUpdate::ChannelUpdateMessage { ref msg } if !chan_closed => { - if let Some(scid) = conditions.expected_blamed_scid { - assert_eq!(msg.contents.short_channel_id, scid); - } - const CHAN_DISABLED_FLAG: u8 = 2; - assert_eq!(msg.contents.flags & CHAN_DISABLED_FLAG, 0); - }, - NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } if chan_closed => { + NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => { if let Some(scid) = conditions.expected_blamed_scid { assert_eq!(*short_channel_id, scid); } - assert!(is_permanent); + assert_eq!(*is_permanent, chan_closed); }, _ => panic!("Unexpected update type"), } @@@ -2718,12 -2709,18 +2711,12 @@@ pub fn send_along_route<'a, 'b, 'c>(ori (our_payment_preimage, our_payment_hash, our_payment_secret, payment_id) } -pub fn do_claim_payment_along_route<'a, 'b, 'c>( - origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, - our_payment_preimage: PaymentPreimage -) -> u64 { - for path in expected_paths.iter() { - assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); +pub fn do_claim_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { + for path in args.expected_paths.iter() { + assert_eq!(path.last().unwrap().node.get_our_node_id(), args.expected_paths[0].last().unwrap().node.get_our_node_id()); } - expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage); - pass_claimed_payment_along_route( - ClaimAlongRouteArgs::new(origin_node, expected_paths, our_payment_preimage) - .skip_last(skip_last) - ) + args.expected_paths[0].last().unwrap().node.claim_funds(args.payment_preimage); + pass_claimed_payment_along_route(args) } pub struct ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { @@@ -2733,7 -2730,6 +2726,7 @@@ pub expected_min_htlc_overpay: Vec, pub skip_last: bool, pub payment_preimage: PaymentPreimage, + pub custom_tlvs: Vec<(u64, Vec)>, // Allow forwarding nodes to have taken 1 msat more fee than expected based on the downstream // fulfill amount. // @@@ -2752,7 -2748,7 +2745,7 @@@ impl<'a, 'b, 'c, 'd> ClaimAlongRouteArg Self { origin_node, expected_paths, expected_extra_fees: vec![0; expected_paths.len()], expected_min_htlc_overpay: vec![0; expected_paths.len()], skip_last: false, payment_preimage, - allow_1_msat_fee_overpay: false, + allow_1_msat_fee_overpay: false, custom_tlvs: vec![], } } pub fn skip_last(mut self, skip_last: bool) -> Self { @@@ -2771,16 -2767,12 +2764,16 @@@ self.allow_1_msat_fee_overpay = true; self } + pub fn with_custom_tlvs(mut self, custom_tlvs: Vec<(u64, Vec)>) -> Self { + self.custom_tlvs = custom_tlvs; + self + } } -pub fn pass_claimed_payment_along_route<'a, 'b, 'c, 'd>(args: ClaimAlongRouteArgs) -> u64 { +pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { let ClaimAlongRouteArgs { origin_node, expected_paths, expected_extra_fees, expected_min_htlc_overpay, skip_last, - payment_preimage: our_payment_preimage, allow_1_msat_fee_overpay, + payment_preimage: our_payment_preimage, allow_1_msat_fee_overpay, custom_tlvs, } = args; let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events(); assert_eq!(claim_event.len(), 1); @@@ -2794,13 -2786,11 +2787,13 @@@ | PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(preimage), .. }, amount_msat, ref htlcs, + ref onion_fields, .. } => { assert_eq!(preimage, our_payment_preimage); assert_eq!(htlcs.len(), expected_paths.len()); // One per path. assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::(), amount_msat); + assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, custom_tlvs); expected_paths.iter().zip(htlcs).for_each(|(path, htlc)| check_claimed_htlc_channel(origin_node, path, htlc)); fwd_amt_msat = amount_msat; }, @@@ -2811,13 -2801,11 +2804,13 @@@ payment_hash, amount_msat, ref htlcs, + ref onion_fields, .. } => { assert_eq!(&payment_hash.0, &Sha256::hash(&our_payment_preimage.0)[..]); assert_eq!(htlcs.len(), expected_paths.len()); // One per path. assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::(), amount_msat); + assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, custom_tlvs); expected_paths.iter().zip(htlcs).for_each(|(path, htlc)| check_claimed_htlc_channel(origin_node, path, htlc)); fwd_amt_msat = amount_msat; } @@@ -2961,20 -2949,15 +2954,20 @@@ expected_total_fee_msat } -pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_preimage: PaymentPreimage) { - let expected_total_fee_msat = do_claim_payment_along_route(origin_node, expected_paths, skip_last, our_payment_preimage); +pub fn claim_payment_along_route(args: ClaimAlongRouteArgs) { + let origin_node = args.origin_node; + let payment_preimage = args.payment_preimage; + let skip_last = args.skip_last; + let expected_total_fee_msat = do_claim_payment_along_route(args); if !skip_last { - expect_payment_sent!(origin_node, our_payment_preimage, Some(expected_total_fee_msat)); + expect_payment_sent!(origin_node, payment_preimage, Some(expected_total_fee_msat)); } } pub fn claim_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], our_payment_preimage: PaymentPreimage) { - claim_payment_along_route(origin_node, &[expected_route], false, our_payment_preimage); + claim_payment_along_route( + ClaimAlongRouteArgs::new(origin_node, &[expected_route], our_payment_preimage) + ); } pub const TEST_FINAL_CLTV: u32 = 70; @@@ -3259,34 -3242,30 +3252,34 @@@ pub fn create_network<'a, 'b: 'a, 'c: ' for i in 0..node_count { for j in (i+1)..node_count { - let node_id_i = nodes[i].node.get_our_node_id(); - let node_id_j = nodes[j].node.get_our_node_id(); - - let init_i = msgs::Init { - features: nodes[i].init_features(&node_id_j), - networks: None, - remote_network_address: None, - }; - let init_j = msgs::Init { - features: nodes[j].init_features(&node_id_i), - networks: None, - remote_network_address: None, - }; - - nodes[i].node.peer_connected(&node_id_j, &init_j, true).unwrap(); - nodes[j].node.peer_connected(&node_id_i, &init_i, false).unwrap(); - nodes[i].onion_messenger.peer_connected(&node_id_j, &init_j, true).unwrap(); - nodes[j].onion_messenger.peer_connected(&node_id_i, &init_i, false).unwrap(); + connect_nodes(&nodes[i], &nodes[j]); } } nodes } +fn connect_nodes<'a, 'b: 'a, 'c: 'b>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>) { + let node_id_a = node_a.node.get_our_node_id(); + let node_id_b = node_b.node.get_our_node_id(); + + let init_a = msgs::Init { + features: node_a.init_features(&node_id_b), + networks: None, + remote_network_address: None, + }; + let init_b = msgs::Init { + features: node_b.init_features(&node_id_a), + networks: None, + remote_network_address: None, + }; + + node_a.node.peer_connected(&node_id_b, &init_b, true).unwrap(); + node_b.node.peer_connected(&node_id_a, &init_a, false).unwrap(); + node_a.onion_messenger.peer_connected(&node_id_b, &init_b, true).unwrap(); + node_b.onion_messenger.peer_connected(&node_id_a, &init_a, false).unwrap(); +} + pub fn connect_dummy_node<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>) { let node_id_dummy = PublicKey::from_slice(&[2; 33]).unwrap(); @@@ -3647,8 -3626,13 +3640,8 @@@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>( pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa, pending_responding_commitment_signed, pending_responding_commitment_signed_dup_monitor, } = args; - node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { - features: node_b.node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + connect_nodes(node_a, node_b); let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); - node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { - features: node_a.node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); if send_channel_ready.0 { @@@ -3862,7 -3846,7 +3855,7 @@@ pub fn create_batch_channel_funding<'a assert_eq!(channel_value_satoshis, event_channel_value_satoshis); assert_eq!(user_channel_id, event_user_channel_id); tx_outs.push(TxOut { - value: *channel_value_satoshis, script_pubkey: output_script.clone(), + value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(), }); }, _ => panic!("Unexpected event"), @@@ -3872,7 -3856,7 +3865,7 @@@ // Compose the batch funding transaction and give it to the ChannelManager. let tx = Transaction { - version: 2, + version: transaction::Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: tx_outs, diff --combined lightning/src/routing/gossip.rs index 25ee1b97f,e76c12e3e..c7908d004 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@@ -9,42 -9,41 +9,42 @@@ //! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers +use bitcoin::amount::Amount; use bitcoin::blockdata::constants::ChainHash; + use bitcoin::secp256k1; use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; - use bitcoin::secp256k1::{PublicKey, Verification}; use bitcoin::secp256k1::Secp256k1; - use bitcoin::secp256k1; + use bitcoin::secp256k1::{PublicKey, Verification}; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; -use bitcoin::network::constants::Network; +use bitcoin::network::Network; use crate::events::{MessageSendEvent, MessageSendEventsProvider}; - use crate::ln::types::ChannelId; - use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures}; - use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT}; - use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter}; - use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; + use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use crate::ln::msgs; + use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, GossipTimestampFilter, NodeAnnouncement}; + use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT}; + use crate::ln::msgs::{QueryChannelRange, QueryShortChannelIds, ReplyChannelRange, ReplyShortChannelIdsEnd}; + use crate::ln::types::ChannelId; use crate::routing::utxo::{self, UtxoLookup, UtxoResolver}; - use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, MaybeReadable}; - use crate::util::logger::{Logger, Level}; + use crate::util::indexed_map::{Entry as IndexedMapEntry, IndexedMap}; + use crate::util::logger::{Level, Logger}; use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; + use crate::util::ser::{MaybeReadable, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer}; use crate::util::string::PrintableString; - use crate::util::indexed_map::{IndexedMap, Entry as IndexedMapEntry}; use crate::io; use crate::io_extras::{copy, sink}; use crate::prelude::*; - use core::{cmp, fmt}; - use crate::sync::{RwLock, RwLockReadGuard, LockTestExt}; - #[cfg(feature = "std")] - use core::sync::atomic::{AtomicUsize, Ordering}; use crate::sync::Mutex; + use crate::sync::{LockTestExt, RwLock, RwLockReadGuard}; use core::ops::{Bound, Deref}; use core::str::FromStr; + #[cfg(feature = "std")] + use core::sync::atomic::{AtomicUsize, Ordering}; + use core::{cmp, fmt}; #[cfg(feature = "std")] use std::time::{SystemTime, UNIX_EPOCH}; @@@ -219,12 -218,6 +219,6 @@@ pub struct ReadOnlyNetworkGraph<'a> /// [BOLT #4]: https://github.com/lightning/bolts/blob/master/04-onion-routing.md #[derive(Clone, Debug, PartialEq, Eq)] pub enum NetworkUpdate { - /// An error indicating a `channel_update` messages should be applied via - /// [`NetworkGraph::update_channel`]. - ChannelUpdateMessage { - /// The update to apply via [`NetworkGraph::update_channel`]. - msg: ChannelUpdate, - }, /// An error indicating that a channel failed to route a payment, which should be applied via /// [`NetworkGraph::channel_failed_permanent`] if permanent. ChannelFailure { @@@ -245,19 -238,69 +239,69 @@@ } } - impl_writeable_tlv_based_enum_upgradable!(NetworkUpdate, - (0, ChannelUpdateMessage) => { - (0, msg, required), - }, - (2, ChannelFailure) => { - (0, short_channel_id, required), - (2, is_permanent, required), - }, - (4, NodeFailure) => { - (0, node_id, required), - (2, is_permanent, required), - }, - ); + impl Writeable for NetworkUpdate { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + match self { + Self::ChannelFailure { short_channel_id, is_permanent } => { + 2u8.write(writer)?; + write_tlv_fields!(writer, { + (0, short_channel_id, required), + (2, is_permanent, required), + }); + }, + Self::NodeFailure { node_id, is_permanent } => { + 4u8.write(writer)?; + write_tlv_fields!(writer, { + (0, node_id, required), + (2, is_permanent, required), + }); + } + } + Ok(()) + } + } + + impl MaybeReadable for NetworkUpdate { + fn read(reader: &mut R) -> Result, DecodeError> { + let id: u8 = Readable::read(reader)?; + match id { + 0 => { + // 0 was previously used for network updates containing a channel update, subsequently + // removed in LDK version 0.0.124. + let mut msg: RequiredWrapper = RequiredWrapper(None); + read_tlv_fields!(reader, { + (0, msg, required), + }); + Ok(Some(Self::ChannelFailure { + short_channel_id: msg.0.unwrap().contents.short_channel_id, + is_permanent: false + })) + }, + 2 => { + _init_and_read_len_prefixed_tlv_fields!(reader, { + (0, short_channel_id, required), + (2, is_permanent, required), + }); + Ok(Some(Self::ChannelFailure { + short_channel_id: short_channel_id.0.unwrap(), + is_permanent: is_permanent.0.unwrap(), + })) + }, + 4 => { + _init_and_read_len_prefixed_tlv_fields!(reader, { + (0, node_id, required), + (2, is_permanent, required), + }); + Ok(Some(Self::NodeFailure { + node_id: node_id.0.unwrap(), + is_permanent: is_permanent.0.unwrap(), + })) + } + t if t % 2 == 0 => Err(DecodeError::UnknownRequiredFeature), + _ => Ok(None), + } + } + } /// Receives and validates network updates from peers, /// stores authentic and relevant data as a network graph. @@@ -354,19 -397,10 +398,10 @@@ where U::Target: UtxoLookup, L::Target impl NetworkGraph where L::Target: Logger { /// Handles any network updates originating from [`Event`]s. - // - /// Note that this will skip applying any [`NetworkUpdate::ChannelUpdateMessage`] to avoid - /// leaking possibly identifying information of the sender to the public network. /// /// [`Event`]: crate::events::Event pub fn handle_network_update(&self, network_update: &NetworkUpdate) { match *network_update { - NetworkUpdate::ChannelUpdateMessage { ref msg } => { - let short_channel_id = msg.contents.short_channel_id; - let is_enabled = msg.contents.flags & (1 << 1) != (1 << 1); - let status = if is_enabled { "enabled" } else { "disabled" }; - log_debug!(self.logger, "Skipping application of a channel update from a payment failure. Channel {} is {}.", short_channel_id, status); - }, NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => { if is_permanent { log_debug!(self.logger, "Removing channel graph entry for {} due to a payment failure.", short_channel_id); @@@ -505,8 -539,8 +540,8 @@@ where U::Target: UtxoLookup, L::Target }; for (_, ref node) in iter { if let Some(node_info) = node.announcement_info.as_ref() { - if let Some(msg) = node_info.announcement_message.clone() { - return Some(msg); + if let NodeAnnouncementInfo::Relayed(announcement) = node_info { + return Some(announcement.clone()); } } } @@@ -1135,136 -1169,45 +1170,136 @@@ impl_writeable_tlv_based!(RoutingFees, }); #[derive(Clone, Debug, PartialEq, Eq)] -/// Information received in the latest node_announcement from this node. -pub struct NodeAnnouncementInfo { +/// Non-relayable information received in the latest node_announcement from this node. +pub struct NodeAnnouncementDetails { /// Protocol features the node announced support for pub features: NodeFeatures, + /// When the last known update to the node state was issued. /// Value is opaque, as set in the announcement. pub last_update: u32, + /// Color assigned to the node pub rgb: [u8; 3], + /// Moniker assigned to the node. /// May be invalid or malicious (eg control chars), /// should not be exposed to the user. pub alias: NodeAlias, + + /// Internet-level addresses via which one can connect to the node + pub addresses: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +/// Information received in the latest node_announcement from this node. +pub enum NodeAnnouncementInfo { /// An initial announcement of the node - /// Mostly redundant with the data we store in fields explicitly. /// Everything else is useful only for sending out for initial routing sync. /// Not stored if contains excess data to prevent DoS. - pub announcement_message: Option + Relayed(NodeAnnouncement), + + /// Non-relayable information received in the latest node_announcement from this node. + Local(NodeAnnouncementDetails), } impl NodeAnnouncementInfo { + + /// Protocol features the node announced support for + pub fn features(&self) -> &NodeFeatures { + match self { + NodeAnnouncementInfo::Relayed(relayed) => { + &relayed.contents.features + } + NodeAnnouncementInfo::Local(local) => { + &local.features + } + } + } + + /// When the last known update to the node state was issued. + /// + /// Value may or may not be a timestamp, depending on the policy of the origin node. + pub fn last_update(&self) -> u32 { + match self { + NodeAnnouncementInfo::Relayed(relayed) => { + relayed.contents.timestamp + } + NodeAnnouncementInfo::Local(local) => { + local.last_update + } + } + } + + /// Color assigned to the node + pub fn rgb(&self) -> [u8; 3] { + match self { + NodeAnnouncementInfo::Relayed(relayed) => { + relayed.contents.rgb + } + NodeAnnouncementInfo::Local(local) => { + local.rgb + } + } + } + + /// Moniker assigned to the node. + /// + /// May be invalid or malicious (eg control chars), should not be exposed to the user. + pub fn alias(&self) -> &NodeAlias { + match self { + NodeAnnouncementInfo::Relayed(relayed) => { + &relayed.contents.alias + } + NodeAnnouncementInfo::Local(local) => { + &local.alias + } + } + } + /// Internet-level addresses via which one can connect to the node - pub fn addresses(&self) -> &[SocketAddress] { - self.announcement_message.as_ref() - .map(|msg| msg.contents.addresses.as_slice()) - .unwrap_or_default() + pub fn addresses(&self) -> &Vec { + match self { + NodeAnnouncementInfo::Relayed(relayed) => { + &relayed.contents.addresses + } + NodeAnnouncementInfo::Local(local) => { + &local.addresses + } + } + } + + /// An initial announcement of the node + /// + /// Not stored if contains excess data to prevent DoS. + pub fn announcement_message(&self) -> Option<&NodeAnnouncement> { + match self { + NodeAnnouncementInfo::Relayed(announcement) => { + Some(announcement) + } + NodeAnnouncementInfo::Local(_) => { + None + } + } } } impl Writeable for NodeAnnouncementInfo { fn write(&self, writer: &mut W) -> Result<(), io::Error> { - let empty_addresses = Vec::::new(); + let features = self.features(); + let last_update = self.last_update(); + let rgb = self.rgb(); + let alias = self.alias(); + let addresses = self.addresses(); + let announcement_message = self.announcement_message(); + write_tlv_fields!(writer, { - (0, self.features, required), - (2, self.last_update, required), - (4, self.rgb, required), - (6, self.alias, required), - (8, self.announcement_message, option), - (10, empty_addresses, required_vec), // Versions prior to 0.0.115 require this field + (0, features, required), + (2, last_update, required), + (4, rgb, required), + (6, alias, required), + (8, announcement_message, option), + (10, *addresses, required_vec), // Versions 0.0.115 through 0.0.123 only serialized an empty vec }); Ok(()) } @@@ -1278,19 -1221,11 +1313,19 @@@ impl Readable for NodeAnnouncementInfo (4, rgb, required), (6, alias, required), (8, announcement_message, option), - (10, _addresses, optional_vec), // deprecated, not used anymore + (10, addresses, required_vec), }); - let _: Option> = _addresses; - Ok(Self { features: features.0.unwrap(), last_update: last_update.0.unwrap(), rgb: rgb.0.unwrap(), - alias: alias.0.unwrap(), announcement_message }) + if let Some(announcement) = announcement_message { + Ok(Self::Relayed(announcement)) + } else { + Ok(Self::Local(NodeAnnouncementDetails { + features: features.0.unwrap(), + last_update: last_update.0.unwrap(), + rgb: rgb.0.unwrap(), + alias: alias.0.unwrap(), + addresses, + })) + } } } @@@ -1592,29 -1527,24 +1627,29 @@@ impl NetworkGraph where L: // The timestamp field is somewhat of a misnomer - the BOLTs use it to order // updates to ensure you always have the latest one, only vaguely suggesting // that it be at least the current time. - if node_info.last_update > msg.timestamp { + if node_info.last_update() > msg.timestamp { return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); - } else if node_info.last_update == msg.timestamp { + } else if node_info.last_update() == msg.timestamp { return Err(LightningError{err: "Update had the same timestamp as last processed update".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); } } let should_relay = msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && - msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && - msg.excess_data.len() + msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY; - node.announcement_info = Some(NodeAnnouncementInfo { - features: msg.features.clone(), - last_update: msg.timestamp, - rgb: msg.rgb, - alias: msg.alias, - announcement_message: if should_relay { full_msg.cloned() } else { None }, - }); + msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY && + msg.excess_data.len() + msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY; + + node.announcement_info = if let (Some(signed_announcement), true) = (full_msg, should_relay) { + Some(NodeAnnouncementInfo::Relayed(signed_announcement.clone())) + } else { + Some(NodeAnnouncementInfo::Local(NodeAnnouncementDetails { + features: msg.features.clone(), + last_update: msg.timestamp, + rgb: msg.rgb, + alias: msg.alias, + addresses: msg.addresses.clone(), + })) + }; Ok(()) } @@@ -1694,7 -1624,7 +1729,7 @@@ self.add_channel_between_nodes(short_channel_id, channel_info, None) } - fn add_channel_between_nodes(&self, short_channel_id: u64, channel_info: ChannelInfo, utxo_value: Option) -> Result<(), LightningError> { + fn add_channel_between_nodes(&self, short_channel_id: u64, channel_info: ChannelInfo, utxo_value: Option) -> Result<(), LightningError> { let mut channels = self.channels.write().unwrap(); let mut nodes = self.nodes.write().unwrap(); @@@ -1823,7 -1753,7 +1858,7 @@@ one_to_two: None, node_two: msg.node_id_2, two_to_one: None, - capacity_sats: utxo_value, + capacity_sats: utxo_value.map(|a| a.to_sat()), announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY { full_msg.cloned() } else { None }, announcement_received_time, @@@ -2236,8 -2166,7 +2271,8 @@@ pub(crate) mod tests use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; use bitcoin::hashes::hex::FromHex; - use bitcoin::network::constants::Network; + use bitcoin::network::Network; + use bitcoin::amount::Amount; use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::script::ScriptBuf; use bitcoin::blockdata::transaction::TxOut; @@@ -2330,7 -2259,7 +2365,7 @@@ let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap(); make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey), - &PublicKey::from_secret_key(secp_ctx, &node_2_btckey)).to_v0_p2wsh() + &PublicKey::from_secret_key(secp_ctx, &node_2_btckey)).to_p2wsh() } pub(crate) fn get_signed_channel_update(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1) -> ChannelUpdate { @@@ -2463,7 -2392,7 +2498,7 @@@ // Now test if the transaction is found in the UTXO set and the script is correct. *chain_source.utxo_ret.lock().unwrap() = - UtxoResult::Sync(Ok(TxOut { value: 0, script_pubkey: good_script.clone() })); + UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script.clone() })); let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| { unsigned_announcement.short_channel_id += 2; }, node_1_privkey, node_2_privkey, &secp_ctx); @@@ -2482,7 -2411,7 +2517,7 @@@ // If we receive announcement for the same channel, once we've validated it against the // chain, we simply ignore all new (duplicate) announcements. *chain_source.utxo_ret.lock().unwrap() = - UtxoResult::Sync(Ok(TxOut { value: 0, script_pubkey: good_script })); + UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); match gossip_sync.handle_channel_announcement(&valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Already have chain-validated channel") @@@ -2559,7 -2488,7 +2594,7 @@@ let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let amount_sats = 1000_000; + let amount_sats = Amount::from_sat(1000_000); let short_channel_id; { @@@ -2623,7 -2552,7 +2658,7 @@@ }; let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| { - unsigned_channel_update.htlc_maximum_msat = amount_sats * 1000 + 1; + unsigned_channel_update.htlc_maximum_msat = amount_sats.to_sat() * 1000 + 1; unsigned_channel_update.timestamp += 110; }, node_1_privkey, &secp_ctx); match gossip_sync.handle_channel_update(&valid_channel_update) { @@@ -2681,8 -2610,7 +2716,7 @@@ let short_channel_id; { - // Check we won't apply an update via `handle_network_update` for privacy reasons, but - // can continue fine if we manually apply it. + // Check that we can manually apply a channel update. let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); short_channel_id = valid_channel_announcement.contents.short_channel_id; let chain_source: Option<&test_utils::TestChainSource> = None; @@@ -2690,14 -2618,10 +2724,10 @@@ assert!(network_graph.read_only().channels().get(&short_channel_id).is_some()); let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx); - assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); - - network_graph.handle_network_update(&NetworkUpdate::ChannelUpdateMessage { - msg: valid_channel_update.clone(), - }); assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none()); network_graph.update_channel(&valid_channel_update).unwrap(); + assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some()); } // Non-permanent failure doesn't touch the channel at all @@@ -3558,7 -3482,13 +3588,7 @@@ // 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one let announcement_message = >::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap(); let announcement_message = NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap(); - let valid_node_ann_info = NodeAnnouncementInfo { - features: channelmanager::provided_node_features(&UserConfig::default()), - last_update: 0, - rgb: [0u8; 3], - alias: NodeAlias([0u8; 32]), - announcement_message: Some(announcement_message) - }; + let valid_node_ann_info = NodeAnnouncementInfo::Relayed(announcement_message); let mut encoded_valid_node_ann_info = Vec::new(); assert!(valid_node_ann_info.write(&mut encoded_valid_node_ann_info).is_ok()); @@@ -3591,8 -3521,8 +3621,8 @@@ let old_ann_info_with_addresses = >::from_hex("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap(); let ann_info_with_addresses = NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice()) .expect("to be able to read an old NodeAnnouncementInfo with addresses"); - // This serialized info has an address field but no announcement_message, therefore the addresses returned by our function will still be empty - assert!(ann_info_with_addresses.addresses().is_empty()); + // This serialized info has no announcement_message but its address field should still be considered + assert!(!ann_info_with_addresses.addresses().is_empty()); } #[test]