From: optout <13562139+optout21@users.noreply.github.com> Date: Fri, 25 Aug 2023 23:30:40 +0000 (+0200) Subject: Use new ChannelId type X-Git-Tag: v0.0.117-alpha1~37^2 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=e99e6ab562af9b9a7e0f2d0dc1fee3dd09bb4082;p=rust-lightning Use new ChannelId type --- diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index cf8060ab6..67ff2a587 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -34,7 +34,7 @@ use lightning::chain::chainmonitor; use lightning::chain::transaction::OutPoint; use lightning::sign::{InMemorySigner, Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider}; use lightning::events::Event; -use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentId, RecipientOnionFields, Retry}; use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,IgnoringMessageHandler}; use lightning::ln::msgs::{self, DecodeError}; @@ -481,7 +481,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { let mut should_forward = false; let mut payments_received: Vec = Vec::new(); let mut payments_sent = 0; - let mut pending_funding_generation: Vec<([u8; 32], PublicKey, u64, Script)> = Vec::new(); + let mut pending_funding_generation: Vec<(ChannelId, PublicKey, u64, Script)> = Vec::new(); let mut pending_funding_signatures = HashMap::new(); loop { diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index 6195e7366..830f6f4e2 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -13,6 +13,7 @@ use bitcoin::hash_types::BlockHash; use lightning::blinded_path::{BlindedHop, BlindedPath}; use lightning::chain::transaction::OutPoint; +use lightning::ln::ChannelId; use lightning::ln::channelmanager::{self, ChannelDetails, ChannelCounterparty}; use lightning::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures}; use lightning::ln::msgs; @@ -210,7 +211,7 @@ pub fn do_test(data: &[u8], out: Out) { let rnid = node_pks.iter().skip(u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize % node_pks.len()).next().unwrap(); let capacity = u64::from_be_bytes(get_slice!(8).try_into().unwrap()); $first_hops_vec.push(ChannelDetails { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), counterparty: ChannelCounterparty { node_id: *rnid, features: channelmanager::provided_init_features(&UserConfig::default()), diff --git a/lightning-invoice/src/utils.rs b/lightning-invoice/src/utils.rs index cc38e67c2..744c2654c 100644 --- a/lightning-invoice/src/utils.rs +++ b/lightning-invoice/src/utils.rs @@ -627,7 +627,7 @@ where log_trace!(logger, "Considering {} channels for invoice route hints", channels.len()); for channel in channels.into_iter().filter(|chan| chan.is_channel_ready) { if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() { - log_trace!(logger, "Ignoring channel {} for invoice route hints", log_bytes!(channel.channel_id)); + log_trace!(logger, "Ignoring channel {} for invoice route hints", &channel.channel_id); continue; } @@ -641,7 +641,7 @@ where // If any public channel exists, return no hints and let the sender // look at the public channels instead. log_trace!(logger, "Not including channels in invoice route hints on account of public channel {}", - log_bytes!(channel.channel_id)); + &channel.channel_id); return vec![].into_iter().take(MAX_CHANNEL_HINTS).map(route_hint_from_channel); } } @@ -681,18 +681,18 @@ where log_trace!(logger, "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", log_pubkey!(channel.counterparty.node_id), - log_bytes!(channel.channel_id), channel.short_channel_id, + &channel.channel_id, channel.short_channel_id, channel.inbound_capacity_msat, - log_bytes!(entry.get().channel_id), entry.get().short_channel_id, + &entry.get().channel_id, entry.get().short_channel_id, current_max_capacity); entry.insert(channel); } else { log_trace!(logger, "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", log_pubkey!(channel.counterparty.node_id), - log_bytes!(entry.get().channel_id), entry.get().short_channel_id, + &entry.get().channel_id, entry.get().short_channel_id, current_max_capacity, - log_bytes!(channel.channel_id), channel.short_channel_id, + &channel.channel_id, channel.short_channel_id, channel.inbound_capacity_msat); } } @@ -731,14 +731,14 @@ where if include_channel { log_trace!(logger, "Including channel {} in invoice route hints", - log_bytes!(channel.channel_id)); + &channel.channel_id); } else if !has_enough_capacity { log_trace!(logger, "Ignoring channel {} without enough capacity for invoice route hints", - log_bytes!(channel.channel_id)); + &channel.channel_id); } else { debug_assert!(!channel.is_usable || (has_pub_unconf_chan && !channel.is_public)); log_trace!(logger, "Ignoring channel {} with disconnected peer", - log_bytes!(channel.channel_id)); + &channel.channel_id); } include_channel diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index da3970da8..47f5605ed 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2642,7 +2642,7 @@ impl ChannelMonitorImpl { } } else if !self.holder_tx_signed { log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); - log_error!(logger, " in channel monitor for channel {}!", log_bytes!(self.funding_info.0.to_channel_id())); + log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id()); log_error!(logger, " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!"); } else { // If we generated a MonitorEvent::CommitmentTxConfirmed, the ChannelManager @@ -3389,7 +3389,7 @@ impl ChannelMonitorImpl { if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 { let mut balance_spendable_csv = None; log_info!(logger, "Channel {} closed by funding output spend in txid {}.", - log_bytes!(self.funding_info.0.to_channel_id()), txid); + &self.funding_info.0.to_channel_id(), txid); self.funding_spend_seen = true; let mut commitment_tx_to_counterparty_output = None; if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.0 >> 8*3) as u8 == 0x20 { diff --git a/lightning/src/chain/transaction.rs b/lightning/src/chain/transaction.rs index ce449a410..22e7ec2c2 100644 --- a/lightning/src/chain/transaction.rs +++ b/lightning/src/chain/transaction.rs @@ -9,7 +9,9 @@ //! Types describing on-chain transactions. +use crate::ln::ChannelId; use bitcoin::hash_types::Txid; +use bitcoin::hashes::Hash; use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint; use bitcoin::blockdata::transaction::Transaction; @@ -57,12 +59,8 @@ pub struct OutPoint { impl OutPoint { /// Convert an `OutPoint` to a lightning channel id. - pub fn to_channel_id(&self) -> [u8; 32] { - let mut res = [0; 32]; - res[..].copy_from_slice(&self.txid[..]); - res[30] ^= ((self.index >> 8) & 0xff) as u8; - res[31] ^= ((self.index >> 0) & 0xff) as u8; - res + pub fn to_channel_id(&self) -> ChannelId { + ChannelId::v1_from_funding_txid(&self.txid.as_inner(), self.index) } /// Converts this OutPoint into the OutPoint field as used by rust-bitcoin @@ -94,10 +92,10 @@ mod tests { assert_eq!(&OutPoint { txid: tx.txid(), index: 0 - }.to_channel_id(), &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]); + }.to_channel_id().0[..], &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]); assert_eq!(&OutPoint { txid: tx.txid(), index: 1 - }.to_channel_id(), &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]); + }.to_channel_id().0[..], &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]); } } diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 36c0f20d2..95f2eb357 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -23,7 +23,7 @@ use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields}; use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; use crate::ln::features::ChannelTypeFeatures; use crate::ln::msgs; -use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; +use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret}; use crate::routing::gossip::NetworkUpdate; use crate::util::errors::APIError; use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, RequiredWrapper, UpgradableRequired, WithoutLength}; @@ -83,7 +83,7 @@ impl_writeable_tlv_based_enum!(PaymentPurpose, #[derive(Clone, Debug, PartialEq, Eq)] pub struct ClaimedHTLC { /// The `channel_id` of the channel over which the HTLC was received. - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The `user_channel_id` of the channel over which the HTLC was received. This is the value /// passed in to [`ChannelManager::create_channel`] for outbound channels, or to /// [`ChannelManager::accept_inbound_channel`] for inbound channels if @@ -246,7 +246,7 @@ pub enum HTLCDestination { /// counterparty node information. node_id: Option, /// The outgoing `channel_id` between us and the next node. - channel_id: [u8; 32], + channel_id: ChannelId, }, /// Scenario where we are unsure of the next node to forward the HTLC to. UnknownNextHop { @@ -364,7 +364,7 @@ pub enum Event { /// [`ChannelManager::funding_transaction_generated`]. /// /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated - temporary_channel_id: [u8; 32], + temporary_channel_id: ChannelId, /// The counterparty's node_id, which you'll need to pass back into /// [`ChannelManager::funding_transaction_generated`]. /// @@ -458,7 +458,7 @@ pub enum Event { /// payment is to pay an invoice or to send a spontaneous payment. purpose: PaymentPurpose, /// The `channel_id` indicating over which channel we received the payment. - via_channel_id: Option<[u8; 32]>, + via_channel_id: Option, /// The `user_channel_id` indicating over which channel we received the payment. via_user_channel_id: Option, /// The block height at which this payment will be failed back and will no longer be @@ -718,17 +718,17 @@ pub enum Event { /// The `channel_id` indicating which channel the spendable outputs belong to. /// /// This will always be `Some` for events generated by LDK versions 0.0.117 and above. - channel_id: Option<[u8; 32]>, + channel_id: Option, }, /// This event is generated when a payment has been successfully forwarded through us and a /// forwarding fee earned. PaymentForwarded { /// The incoming channel between the previous node and us. This is only `None` for events /// generated or serialized by versions prior to 0.0.107. - prev_channel_id: Option<[u8; 32]>, + prev_channel_id: Option, /// The outgoing channel between the next node and us. This is only `None` for events /// generated or serialized by versions prior to 0.0.107. - next_channel_id: Option<[u8; 32]>, + next_channel_id: Option, /// The fee, in milli-satoshis, which was earned as a result of the payment. /// /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC @@ -759,7 +759,7 @@ pub enum Event { /// [`Event::ChannelReady`] event. ChannelPending { /// The `channel_id` of the channel that is pending confirmation. - channel_id: [u8; 32], + channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise @@ -772,7 +772,7 @@ pub enum Event { /// The `temporary_channel_id` this channel used to be known by during channel establishment. /// /// Will be `None` for channels created prior to LDK version 0.0.115. - former_temporary_channel_id: Option<[u8; 32]>, + former_temporary_channel_id: Option, /// The `node_id` of the channel counterparty. counterparty_node_id: PublicKey, /// The outpoint of the channel's funding transaction. @@ -784,7 +784,7 @@ pub enum Event { /// establishment. ChannelReady { /// The `channel_id` of the channel that is ready. - channel_id: [u8; 32], + channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise @@ -811,7 +811,7 @@ pub enum Event { ChannelClosed { /// The `channel_id` of the channel which has been closed. Note that on-chain transactions /// resolving the channel are likely still awaiting confirmation. - channel_id: [u8; 32], + channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise @@ -838,7 +838,7 @@ pub enum Event { /// inputs for another purpose. DiscardFunding { /// The channel_id of the channel which has been closed. - channel_id: [u8; 32], + channel_id: ChannelId, /// The full transaction received from the user transaction: Transaction }, @@ -863,7 +863,7 @@ pub enum Event { /// /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn - temporary_channel_id: [u8; 32], + temporary_channel_id: ChannelId, /// The node_id of the counterparty requesting to open the channel. /// /// When responding to the request, the `counterparty_node_id` should be passed @@ -909,7 +909,7 @@ pub enum Event { /// requirements (i.e. insufficient fees paid, or a CLTV that is too soon). HTLCHandlingFailed { /// The channel over which the HTLC was received. - prev_channel_id: [u8; 32], + prev_channel_id: ChannelId, /// Destination of the HTLC that failed to be processed. failed_next_destination: HTLCDestination, }, @@ -1279,7 +1279,7 @@ impl MaybeReadable for Event { 5u8 => { let f = || { let mut outputs = WithoutLength(Vec::new()); - let mut channel_id: Option<[u8; 32]> = None; + let mut channel_id: Option = None; read_tlv_fields!(reader, { (0, outputs, required), (1, channel_id, option), @@ -1335,7 +1335,7 @@ impl MaybeReadable for Event { }, 9u8 => { let f = || { - let mut channel_id = [0; 32]; + let mut channel_id = ChannelId::new_zero(); let mut reason = UpgradableRequired(None); let mut user_channel_id_low_opt: Option = None; let mut user_channel_id_high_opt: Option = None; @@ -1363,7 +1363,7 @@ impl MaybeReadable for Event { }, 11u8 => { let f = || { - let mut channel_id = [0; 32]; + let mut channel_id = ChannelId::new_zero(); let mut transaction = Transaction{ version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() }; read_tlv_fields!(reader, { (0, channel_id, required), @@ -1474,7 +1474,7 @@ impl MaybeReadable for Event { }, 25u8 => { let f = || { - let mut prev_channel_id = [0; 32]; + let mut prev_channel_id = ChannelId::new_zero(); let mut failed_next_destination_opt = UpgradableRequired(None); read_tlv_fields!(reader, { (0, prev_channel_id, required), @@ -1490,7 +1490,7 @@ impl MaybeReadable for Event { 27u8 => Ok(None), 29u8 => { let f = || { - let mut channel_id = [0; 32]; + let mut channel_id = ChannelId::new_zero(); let mut user_channel_id: u128 = 0; let mut counterparty_node_id = RequiredWrapper(None); let mut channel_type = RequiredWrapper(None); @@ -1512,7 +1512,7 @@ impl MaybeReadable for Event { }, 31u8 => { let f = || { - let mut channel_id = [0; 32]; + let mut channel_id = ChannelId::new_zero(); let mut user_channel_id: u128 = 0; let mut former_temporary_channel_id = None; let mut counterparty_node_id = RequiredWrapper(None); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f68d7433e..da1364021 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -22,7 +22,7 @@ use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature}; use bitcoin::secp256k1; -use crate::ln::{PaymentPreimage, PaymentHash}; +use crate::ln::{ChannelId, PaymentPreimage, PaymentHash}; use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; use crate::ln::msgs::DecodeError; @@ -532,7 +532,7 @@ pub(super) struct ReestablishResponses { /// channel's counterparty_node_id and channel_id). pub(crate) type ShutdownResult = ( Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, - Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])> + Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)> ); /// If the majority of the channels funds are to the fundee and the initiator holds only just @@ -637,8 +637,11 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { user_id: u128, - channel_id: [u8; 32], - temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115. + /// The current channel ID. + channel_id: ChannelId, + /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID. + /// Will be `None` for channels created prior to 0.0.115. + temporary_channel_id: Option, channel_state: u32, // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to @@ -957,14 +960,14 @@ impl ChannelContext where SP::Target: SignerProvider { // Public utilities: - pub fn channel_id(&self) -> [u8; 32] { + pub fn channel_id(&self) -> ChannelId { self.channel_id } // Return the `temporary_channel_id` used during channel establishment. // // Will return `None` for channels created prior to LDK version 0.0.115. - pub fn temporary_channel_id(&self) -> Option<[u8; 32]> { + pub fn temporary_channel_id(&self) -> Option { self.temporary_channel_id } @@ -1232,7 +1235,8 @@ impl ChannelContext where SP::Target: SignerProvider { log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), - log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); + &self.channel_id, + if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); macro_rules! get_htlc_in_commitment { ($htlc: expr, $offered: expr) => { @@ -2222,7 +2226,7 @@ impl Channel where InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill(_) = reason { } else { - log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, log_bytes!(self.context.channel_id())); + log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id()); debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); } return UpdateFulfillFetch::DuplicateClaim {}; @@ -2275,7 +2279,7 @@ impl Channel where }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id())); + log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id()); // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); @@ -2285,7 +2289,7 @@ impl Channel where _ => {} } } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state); + log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); @@ -2303,7 +2307,7 @@ impl Channel where debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } - log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, log_bytes!(self.context.channel_id)); + log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } @@ -2442,7 +2446,7 @@ impl Channel where _ => {} } } - log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); + log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id()); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { htlc_id: htlc_id_arg, err_packet, @@ -2450,7 +2454,7 @@ impl Channel where return Ok(None); } - log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); + log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id()); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); @@ -2493,7 +2497,7 @@ impl Channel where let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; @@ -2547,7 +2551,7 @@ impl Channel where self.context.cur_holder_commitment_transaction_number -= 1; self.context.cur_counterparty_commitment_transaction_number -= 1; - log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id())); + log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); let need_channel_ready = self.check_get_channel_ready(0).is_some(); self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); @@ -2621,7 +2625,7 @@ impl Channel where self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); - log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id())); + log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id()); Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) } @@ -2749,7 +2753,7 @@ impl Channel where if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { // Note that if the pending_forward_status is not updated here, then it's because we're already failing // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id())); + log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } } else { @@ -2877,7 +2881,7 @@ impl Channel where log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), - log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); + log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id()); if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) { return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned())); } @@ -2947,7 +2951,7 @@ impl Channel where let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]); log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), - encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id())); + encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id()); if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())); } @@ -2991,7 +2995,7 @@ impl Channel where } else { None }; if let Some(forward_info) = new_forward { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", - &htlc.payment_hash, log_bytes!(self.context.channel_id)); + &htlc.payment_hash, &self.context.channel_id); htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); need_commitment = true; } @@ -3000,7 +3004,7 @@ impl Channel where for htlc in self.context.pending_outbound_htlcs.iter_mut() { if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", - &htlc.payment_hash, log_bytes!(self.context.channel_id)); + &htlc.payment_hash, &self.context.channel_id); // Grab the preimage, if it exists, instead of cloning let mut reason = OutboundHTLCOutcome::Success(None); mem::swap(outcome, &mut reason); @@ -3050,7 +3054,7 @@ impl Channel where monitor_update.updates.append(&mut additional_update.updates); } log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", - log_bytes!(self.context.channel_id)); + &self.context.channel_id); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -3067,7 +3071,7 @@ impl Channel where } else { false }; log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", - log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" }); + &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" }); self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -3096,7 +3100,7 @@ impl Channel where assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), - if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id())); + if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id()); let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! @@ -3127,8 +3131,7 @@ impl Channel where Err(e) => { match e { ChannelError::Ignore(ref msg) => { - log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", - &payment_hash, msg, log_bytes!(self.context.channel_id())); + log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id()); // If we fail to send here, then this HTLC should // be failed backwards. Failing to send here // indicates that this HTLC may keep being put back @@ -3194,7 +3197,7 @@ impl Channel where monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", - log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" }, + &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); @@ -3283,7 +3286,7 @@ impl Channel where self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } - log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id())); + log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id()); let mut to_forward_infos = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); @@ -3428,7 +3431,7 @@ impl Channel where self.context.monitor_pending_forwards.append(&mut to_forward_infos); self.context.monitor_pending_failures.append(&mut revoked_htlcs); self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); - log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id()); return_with_htlcs_to_fail!(Vec::new()); } @@ -3440,7 +3443,7 @@ impl Channel where monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.", - log_bytes!(self.context.channel_id()), release_state_str); + &self.context.channel_id(), release_state_str); self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); return_with_htlcs_to_fail!(htlcs_to_fail); @@ -3455,7 +3458,7 @@ impl Channel where monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.", - log_bytes!(self.context.channel_id()), + &self.context.channel_id(), update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), release_state_str); @@ -3463,7 +3466,7 @@ impl Channel where return_with_htlcs_to_fail!(htlcs_to_fail); } else { log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.", - log_bytes!(self.context.channel_id()), release_state_str); + &self.context.channel_id(), release_state_str); self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); return_with_htlcs_to_fail!(htlcs_to_fail); @@ -3625,7 +3628,7 @@ impl Channel where self.context.sent_message_awaiting_response = None; self.context.channel_state |= ChannelState::PeerDisconnected as u32; - log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id())); + log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id()); } /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted. @@ -3728,7 +3731,7 @@ impl Channel where self.context.monitor_pending_commitment_signed = false; let order = self.context.resend_order.clone(); log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", - log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, + &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { @@ -3840,7 +3843,7 @@ impl Channel where } else { None }; log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", - log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" }, + &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, @@ -3896,8 +3899,8 @@ impl Channel where if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { macro_rules! log_and_panic { ($err_msg: expr) => { - log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); - panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); + log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id)); + panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id)); } } log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ @@ -3996,9 +3999,9 @@ impl Channel where if msg.next_local_commitment_number == next_counterparty_commitment_number { if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); } else { - log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); } Ok(ReestablishResponses { @@ -4009,9 +4012,9 @@ impl Channel where }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { - log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id()); } if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { @@ -4752,14 +4755,14 @@ impl Channel where // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). if let Some(channel_ready) = self.check_get_channel_ready(height) { - log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id)); + log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger); return Ok((Some(channel_ready), announcement_sigs)); } } for inp in tx.input.iter() { if inp.previous_output == funding_txo.into_bitcoin_outpoint() { - log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id())); + log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id()); return Err(ClosureReason::CommitmentTxConfirmed); } } @@ -4821,7 +4824,7 @@ impl Channel where let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) } else { None }; - log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id)); + log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs)); } @@ -4852,7 +4855,7 @@ impl Channel where } } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() && height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { - log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id)); + log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id); // If funding_tx_confirmed_in is unset, the channel must not be active assert!(non_shutdown_state <= ChannelState::ChannelReady as u32); assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0); @@ -4962,7 +4965,7 @@ impl Channel where return None; } - log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id())); + log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id()); let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) { Ok(a) => a, Err(e) => { @@ -5097,10 +5100,10 @@ impl Channel where let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap(); - log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id())); + log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id()); remote_last_secret } else { - log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id())); + log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id()); [0;32] }; self.mark_awaiting_response(); @@ -5381,14 +5384,14 @@ impl Channel where log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), - log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id())); + log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id()); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)), log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), - log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id())); + log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id()); } } @@ -5636,7 +5639,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), }; - let temporary_channel_id = entropy_source.get_secure_random_bytes(); + let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source); Ok(Self { context: ChannelContext { @@ -6484,7 +6487,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), - encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); + encode::serialize_hex(&funding_script), &self.context.channel_id()); secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); } @@ -6494,7 +6497,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); match &self.context.holder_signer { // TODO (arik): move match into calling method for Taproot @@ -6588,7 +6591,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { self.context.cur_counterparty_commitment_transaction_number -= 1; self.context.cur_holder_commitment_transaction_number -= 1; - log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id())); + log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id()); // Promote the channel to a full-fledged one now that we have updated the state and have a // `ChannelMonitor`. @@ -7258,7 +7261,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut user_id_high_opt: Option = None; let mut channel_keys_id: Option<[u8; 32]> = None; - let mut temporary_channel_id: Option<[u8; 32]> = None; + let mut temporary_channel_id: Option = None; let mut holder_max_accepted_htlcs: Option = None; let mut blocked_monitor_updates = Some(Vec::new()); diff --git a/lightning/src/ln/channel_id.rs b/lightning/src/ln/channel_id.rs index 332fc355e..621ebf0c4 100644 --- a/lightning/src/ln/channel_id.rs +++ b/lightning/src/ln/channel_id.rs @@ -29,7 +29,7 @@ use core::ops::Deref; /// /// This is not exported to bindings users as we just use [u8; 32] directly. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct ChannelId (pub [u8; 32]); +pub struct ChannelId(pub [u8; 32]); impl ChannelId { /// Create _v1_ channel ID based on a funding TX ID and output index diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 55980dd7a..742dc0344 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -39,7 +39,7 @@ use crate::events; use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. -use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; +use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] @@ -413,13 +413,13 @@ impl Into for FailureCode { struct MsgHandleErrInternal { err: msgs::LightningError, - chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed + chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed shutdown_finish: Option<(ShutdownResult, Option)>, channel_capacity: Option, } impl MsgHandleErrInternal { #[inline] - fn send_err_msg_no_close(err: String, channel_id: [u8; 32]) -> Self { + fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self { Self { err: LightningError { err: err.clone(), @@ -440,7 +440,7 @@ impl MsgHandleErrInternal { Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None } } #[inline] - fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option, channel_capacity: u64) -> Self { + fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option, channel_capacity: u64) -> Self { Self { err: LightningError { err: err.clone(), @@ -457,7 +457,7 @@ impl MsgHandleErrInternal { } } #[inline] - fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { + fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self { Self { err: match err { ChannelError::Warn(msg) => LightningError { @@ -582,7 +582,7 @@ enum BackgroundEvent { /// on a channel. MonitorUpdatesComplete { counterparty_node_id: PublicKey, - channel_id: [u8; 32], + channel_id: ChannelId, }, } @@ -643,7 +643,7 @@ pub(crate) enum RAAMonitorUpdateBlockingAction { /// durably to disk. ForwardedPaymentInboundClaim { /// The upstream channel ID (i.e. the inbound edge). - channel_id: [u8; 32], + channel_id: ChannelId, /// The HTLC ID on the inbound edge. htlc_id: u64, }, @@ -669,26 +669,26 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// `channel_id` -> `Channel`. /// /// Holds all funded channels where the peer is the counterparty. - pub(super) channel_by_id: HashMap<[u8; 32], Channel>, + pub(super) channel_by_id: HashMap>, /// `temporary_channel_id` -> `OutboundV1Channel`. /// /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has /// been assigned a `channel_id`, the entry in this map is removed and one is created in /// `channel_by_id`. - pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel>, + pub(super) outbound_v1_channel_by_id: HashMap>, /// `temporary_channel_id` -> `InboundV1Channel`. /// /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has /// been assigned a `channel_id`, the entry in this map is removed and one is created in /// `channel_by_id`. - pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel>, + pub(super) inbound_v1_channel_by_id: HashMap>, /// `temporary_channel_id` -> `InboundChannelRequest`. /// /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where /// the peer is the counterparty. If the channel is accepted, then the entry in this table is /// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If /// the channel is rejected, then the entry is simply removed. - pub(super) inbound_channel_request_by_id: HashMap<[u8; 32], InboundChannelRequest>, + pub(super) inbound_channel_request_by_id: HashMap, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, /// Messages to send to the peer - pushed to in the same lock that they are generated in (except @@ -715,12 +715,12 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure /// duplicates do not occur, so such channels should fail without a monitor update completing. - monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec>, + monitor_update_blocked_actions: BTreeMap>, /// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have /// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update /// will remove a preimage that needs to be durably in an upstream channel first), we put an /// entry here to note that the channel with the key's ID is blocked on a set of actions. - actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec>, + actions_blocking_raa_monitor_updates: BTreeMap>, /// The peer is currently connected (i.e. we've seen a /// [`ChannelMessageHandler::peer_connected`] and no corresponding /// [`ChannelMessageHandler::peer_disconnected`]. @@ -748,11 +748,11 @@ impl PeerState where SP::Target: SignerProvider { } // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer. - fn has_channel(&self, channel_id: &[u8; 32]) -> bool { - self.channel_by_id.contains_key(channel_id) || - self.outbound_v1_channel_by_id.contains_key(channel_id) || - self.inbound_v1_channel_by_id.contains_key(channel_id) || - self.inbound_channel_request_by_id.contains_key(channel_id) + fn has_channel(&self, channel_id: &ChannelId) -> bool { + self.channel_by_id.contains_key(&channel_id) || + self.outbound_v1_channel_by_id.contains_key(&channel_id) || + self.inbound_v1_channel_by_id.contains_key(&channel_id) || + self.inbound_channel_request_by_id.contains_key(&channel_id) } } @@ -1099,7 +1099,7 @@ where /// required to access the channel with the `counterparty_node_id`. /// /// See `ChannelManager` struct-level documentation for lock order requirements. - id_to_peer: Mutex>, + id_to_peer: Mutex>, /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. /// @@ -1113,9 +1113,9 @@ where /// /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(test)] - pub(super) short_to_chan_info: FairRwLock>, + pub(super) short_to_chan_info: FairRwLock>, #[cfg(not(test))] - short_to_chan_info: FairRwLock>, + short_to_chan_info: FairRwLock>, our_network_pubkey: PublicKey, @@ -1417,7 +1417,7 @@ pub struct ChannelDetails { /// thereafter this is the txid of the funding transaction xor the funding transaction output). /// Note that this means this value is *not* persistent - it can change once during the /// lifetime of the channel. - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// Parameters which apply to our counterparty. See individual fields for more information. pub counterparty: ChannelCounterparty, /// The Channel's funding transaction output, if we've negotiated the funding transaction with @@ -1816,7 +1816,7 @@ macro_rules! convert_chan_err { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, ChannelError::Close(msg) => { - log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, "Closing channel {} due to close-required error: {}", &$channel_id, msg); update_maps_on_chan_removal!($self, &$channel.context); let shutdown_res = $channel.context.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(), @@ -1829,7 +1829,7 @@ macro_rules! convert_chan_err { // We should only ever have `ChannelError::Close` when unfunded channels error. // In any case, just close the channel. ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => { - log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", &$channel_id, msg); update_maps_on_chan_removal!($self, &$channel_context); let shutdown_res = $channel_context.force_shutdown(false); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(), @@ -2002,12 +2002,12 @@ macro_rules! handle_new_monitor_update { match $update_res { ChannelMonitorUpdateStatus::InProgress => { log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", - log_bytes!($chan.context.channel_id()[..])); + &$chan.context.channel_id()); Ok(false) }, ChannelMonitorUpdateStatus::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", - log_bytes!($chan.context.channel_id()[..])); + &$chan.context.channel_id()); update_maps_on_chan_removal!($self, &$chan.context); let res = Err(MsgHandleErrInternal::from_finish_shutdown( "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(), @@ -2256,7 +2256,7 @@ where /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id - pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option) -> Result<[u8; 32], APIError> { + pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option) -> Result { if channel_value_satoshis < 1000 { return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } @@ -2307,7 +2307,7 @@ where Ok(temporary_channel_id) } - fn list_funded_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_funded_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside @@ -2452,7 +2452,7 @@ where }, None)); } - fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { + fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; @@ -2544,7 +2544,7 @@ where /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown - pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> { + pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, None, None) } @@ -2578,7 +2578,7 @@ where /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown - pub fn close_channel_with_feerate_and_script(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, shutdown_script: Option) -> Result<(), APIError> { + pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, shutdown_script: Option) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script) } @@ -2603,7 +2603,7 @@ where /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool) + fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool) -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(peer_node_id) @@ -2617,33 +2617,33 @@ where ClosureReason::HolderForceClosed }; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { - log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + log_error!(self.logger, "Force-closing channel {}", &channel_id); self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(broadcast)); (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id()) } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) { - log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + log_error!(self.logger, "Force-closing channel {}", &channel_id); self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) { - log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + log_error!(self.logger, "Force-closing channel {}", &channel_id); self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { - log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + log_error!(self.logger, "Force-closing channel {}", &channel_id); // N.B. that we don't send any channel close event here: we // don't have a user_channel_id, and we never sent any opening // events anyway. (None, *peer_node_id) } else { - return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); + return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) }); } }; if let Some(update) = update_opt { @@ -2656,7 +2656,7 @@ where Ok(counterparty_node_id) } - fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { + fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) { Ok(counterparty_node_id) => { @@ -2682,7 +2682,7 @@ where /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding /// channel. - pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) + pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.force_close_sending_error(channel_id, counterparty_node_id, true) } @@ -2693,7 +2693,7 @@ where /// /// You can always get the latest local transaction(s) to broadcast from /// [`ChannelMonitor::get_latest_holder_commitment_txn`]. - pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) + pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.force_close_sending_error(channel_id, counterparty_node_id, false) } @@ -3133,7 +3133,7 @@ where if chan.context.get_short_channel_id().is_none() { return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}); } - log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id())); + log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id()); self.get_channel_update_for_unicast(chan) } @@ -3149,7 +3149,7 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel) -> Result { - log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id())); + log_trace!(self.logger, "Attempting to generate channel update for channel {}", &chan.context.channel_id()); let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, @@ -3159,7 +3159,7 @@ where } fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel) -> Result { - log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id())); + log_trace!(self.logger, "Generating channel update for channel {}", &chan.context.channel_id()); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; let enabled = chan.context.is_usable() && match chan.channel_update_status() { @@ -3455,7 +3455,7 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. fn funding_transaction_generated_intern, &Transaction) -> Result>( - &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput + &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -3463,7 +3463,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(temporary_channel_id) { + let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(&temporary_channel_id) { Some(chan) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; @@ -3492,7 +3492,7 @@ where return Err(APIError::ChannelUnavailable { err: format!( "Channel with id {} not found for the passed counterparty node_id {}", - log_bytes!(*temporary_channel_id), counterparty_node_id), + temporary_channel_id, counterparty_node_id), }) }, }; @@ -3517,7 +3517,7 @@ where } #[cfg(test)] - pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> { + pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> { self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| { Ok(OutPoint { txid: tx.txid(), index: output_index }) }) @@ -3553,7 +3553,7 @@ where /// /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed - pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> { + pub fn funding_transaction_generated(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); for inp in funding_transaction.input.iter() { @@ -3626,7 +3626,7 @@ where /// [`ChannelUnavailable`]: APIError::ChannelUnavailable /// [`APIMisuseError`]: APIError::APIMisuseError pub fn update_partial_channel_config( - &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config_update: &ChannelConfigUpdate, + &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate, ) -> Result<(), APIError> { if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) { return Err(APIError::APIMisuseError { @@ -3643,7 +3643,7 @@ where for channel_id in channel_ids { if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { - err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id), + err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", channel_id, counterparty_node_id), }); }; } @@ -3675,7 +3675,7 @@ where return Err(APIError::ChannelUnavailable { err: format!( "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!", - log_bytes!(*channel_id), counterparty_node_id), + channel_id, counterparty_node_id), }); }; let mut config = context.config(); @@ -3710,7 +3710,7 @@ where /// [`ChannelUnavailable`]: APIError::ChannelUnavailable /// [`APIMisuseError`]: APIError::APIMisuseError pub fn update_channel_config( - &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig, + &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig, ) -> Result<(), APIError> { return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into()); } @@ -3740,7 +3740,7 @@ where /// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat // TODO: when we move to deciding the best outbound channel at forward time, only take // `next_node_id` and not `next_hop_channel_id` - pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> { + pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let next_hop_scid = { @@ -3749,18 +3749,18 @@ where .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.get(next_hop_channel_id) { + match peer_state.channel_by_id.get(&next_hop_channel_id) { Some(chan) => { if !chan.context.is_usable() { return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) + err: format!("Channel with id {} not fully established", next_hop_channel_id) }) } chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias()) }, None => return Err(APIError::ChannelUnavailable { err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.", - log_bytes!(*next_hop_channel_id), next_node_id) + next_hop_channel_id, next_node_id) }) } }; @@ -4372,21 +4372,21 @@ where let _ = self.process_background_events(); } - fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> NotifyOption { + fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel, new_feerate: u32) -> NotifyOption { if !chan.context.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); + &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } if !chan.context.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", - log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); + &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); + &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger); NotifyOption::DoPersist @@ -4514,7 +4514,7 @@ where if chan.should_disconnect_peer_awaiting_response() { log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}", - counterparty_node_id, log_bytes!(*chan_id)); + counterparty_node_id, chan_id); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::DisconnectPeerWithWarning { @@ -4530,7 +4530,7 @@ where }); let process_unfunded_channel_tick = | - chan_id: &[u8; 32], + chan_id: &ChannelId, chan_context: &mut ChannelContext, unfunded_chan_context: &mut UnfundedChannelContext, pending_msg_events: &mut Vec, @@ -4539,7 +4539,7 @@ where if unfunded_chan_context.should_expire_unfunded_channel() { log_error!(self.logger, "Force-closing pending channel with ID {} for not establishing in a timely manner", - log_bytes!(&chan_id[..])); + &chan_id); update_maps_on_chan_removal!(self, &chan_context); self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed); self.finish_force_close_channel(chan_context.force_shutdown(false)); @@ -4564,7 +4564,7 @@ where for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() { if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 { - log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", log_bytes!(&chan_id[..])); + log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id); peer_state.pending_msg_events.push( events::MessageSendEvent::HandleError { node_id: counterparty_node_id, @@ -4767,7 +4767,7 @@ where // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to // be surfaced to the user. fn fail_holding_cell_htlcs( - &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32], + &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId, counterparty_node_id: &PublicKey ) { let (failure_code, onion_failure_data) = { @@ -5043,7 +5043,7 @@ where if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { if let Some(action) = completion_action(Some(htlc_value_msat)) { log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}", - log_bytes!(chan_id), action); + &chan_id, action); peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); } if !during_init { @@ -5210,7 +5210,7 @@ where channel_ready: Option, announcement_sigs: Option) -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", - log_bytes!(channel.context.channel_id()), + &channel.context.channel_id(), if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), if funding_broadcastable.is_some() { "" } else { "not " }, @@ -5338,7 +5338,7 @@ where /// /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id - pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> { + pub fn accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> { self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id) } @@ -5360,11 +5360,11 @@ where /// /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id - pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> { + pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> { self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id) } - fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { + fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let peers_without_funded_channels = @@ -5730,7 +5730,7 @@ where let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer, self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan); if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id())); + log_trace!(self.logger, "Sending announcement_signatures for channel {}", &chan.get().context.channel_id()); peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, @@ -5741,7 +5741,7 @@ where // counterparty's announcement_signatures. Thus, we only bother to send a // channel_update here if the channel is not public, i.e. we're not sending an // announcement_signatures. - log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id())); + log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", &chan.get().context.channel_id()); if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), @@ -5775,13 +5775,13 @@ where // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per // https://github.com/lightningdevkit/rust-lightning/issues/2422 if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) { - log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); let mut chan = remove_channel!(self, chan_entry); self.finish_force_close_channel(chan.context.force_shutdown(false)); return Ok(()); } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) { - log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); let mut chan = remove_channel!(self, chan_entry); self.finish_force_close_channel(chan.context.force_shutdown(false)); @@ -5789,7 +5789,7 @@ where } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) { if !chan_entry.get().received_shutdown() { log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", - log_bytes!(msg.channel_id), + &msg.channel_id, if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } @@ -6126,7 +6126,7 @@ where /// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of /// the [`ChannelMonitorUpdate`] in question. fn raa_monitor_updates_held(&self, - actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec>, + actions_blocking_raa_monitor_updates: &BTreeMap>, channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey ) -> bool { actions_blocking_raa_monitor_updates @@ -6253,7 +6253,7 @@ where if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); } else { - log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id)); + log_debug!(self.logger, "Received channel_update for channel {}.", &chan_id); try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan); } }, @@ -6437,7 +6437,7 @@ where if let Some(monitor_update) = monitor_opt { has_monitor_update = true; - let channel_id: [u8; 32] = *channel_id; + let channel_id: ChannelId = *channel_id; let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING, peer_state.channel_by_id.remove(&channel_id)); @@ -6771,7 +6771,7 @@ where // blocking monitor updates for this channel. If we do, release the monitor // update(s) when those blockers complete. log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first", - log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); + &channel_funding_outpoint.to_channel_id()); break; } @@ -6779,7 +6779,7 @@ where debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() { log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor", - log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); + &channel_funding_outpoint.to_channel_id()); if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, peer_state_lck, peer_state, per_peer_state, chan) { @@ -6792,7 +6792,7 @@ where } } else { log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update", - log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); + &channel_funding_outpoint.to_channel_id()); } } } else { @@ -7090,7 +7090,7 @@ where if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); if channel.context.is_usable() { - log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id())); + log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", &channel.context.channel_id()); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: channel.context.get_counterparty_node_id(), @@ -7098,7 +7098,7 @@ where }); } } else { - log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id())); + log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", &channel.context.channel_id()); } } @@ -7108,7 +7108,7 @@ where } if let Some(announcement_sigs) = announcement_sigs { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id())); + log_trace!(self.logger, "Sending announcement_signatures for channel {}", &channel.context.channel_id()); pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: channel.context.get_counterparty_node_id(), msg: announcement_sigs, @@ -7567,7 +7567,7 @@ where // very low priority for the LND team despite being marked "P1". // We're not going to bother handling this in a sensible way, instead simply // repeating the Shutdown message on repeat until morale improves. - if msg.channel_id != [0; 32] { + if !msg.channel_id.is_zero() { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if peer_state_mutex_opt.is_none() { return; } @@ -7596,8 +7596,8 @@ where _ => {} } - if msg.channel_id == [0; 32] { - let channel_ids: Vec<[u8; 32]> = { + if msg.channel_id.is_zero() { + let channel_ids: Vec = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if peer_state_mutex_opt.is_none() { return; } @@ -8611,7 +8611,7 @@ where let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); @@ -8631,7 +8631,7 @@ where log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); + &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true); if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { @@ -8661,13 +8661,13 @@ where // backwards leg of the HTLC will simply be rejected. log_info!(args.logger, "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", - log_bytes!(channel.context.channel_id()), &payment_hash); + &channel.context.channel_id(), &payment_hash); failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id())); } } } else { log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", - log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(), + &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id()); if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); @@ -8700,7 +8700,7 @@ where channel_capacity_sats: Some(channel.context.get_value_satoshis()), }, None)); } else { - log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id())); + log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id()); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds."); @@ -8712,7 +8712,7 @@ where for (funding_txo, _) in args.channel_monitors.iter() { if !funding_txo_set.contains(funding_txo) { log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed", - log_bytes!(funding_txo.to_channel_id())); + &funding_txo.to_channel_id()); let monitor_update = ChannelMonitorUpdate { update_id: CLOSED_CHANNEL_UPDATE_ID, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], @@ -8896,7 +8896,7 @@ where $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); for update in $chan_in_flight_upds.iter() { log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", - update.update_id, $channel_info_log, log_bytes!($funding_txo.to_channel_id())); + update.update_id, $channel_info_log, &$funding_txo.to_channel_id()); max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); pending_background_events.push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { @@ -8944,7 +8944,7 @@ where // If the channel is ahead of the monitor, return InvalidValue: log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", - log_bytes!(chan.context.channel_id()), monitor.get_latest_update_id(), max_in_flight_update_id); + &chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id); log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id()); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); @@ -8970,7 +8970,7 @@ where } else { log_error!(args.logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(args.logger, " The ChannelMonitor for channel {} is missing.", - log_bytes!(funding_txo.to_channel_id())); + &funding_txo.to_channel_id()); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -9056,7 +9056,7 @@ where if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id()); false } else { true } } else { true } @@ -9066,7 +9066,7 @@ where pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id()); pending_events_read.retain(|(event, _)| { if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { intercepted_id != ev_id @@ -9408,6 +9408,7 @@ mod tests { use core::sync::atomic::Ordering; use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; + use crate::ln::ChannelId; use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, ErrorAction}; @@ -9981,7 +9982,7 @@ mod tests { nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel); let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); - let channel_id = &tx.txid().into_inner(); + let channel_id = ChannelId::from_bytes(tx.txid().into_inner()); { // Ensure that the `id_to_peer` map is empty until either party has received the // funding transaction, and have the real `channel_id`. @@ -9995,7 +9996,7 @@ mod tests { // as it has the funding transaction. let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); - assert!(nodes_0_lock.contains_key(channel_id)); + assert!(nodes_0_lock.contains_key(&channel_id)); } assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); @@ -10006,7 +10007,7 @@ mod tests { { let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); - assert!(nodes_0_lock.contains_key(channel_id)); + assert!(nodes_0_lock.contains_key(&channel_id)); } expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -10015,7 +10016,7 @@ mod tests { // as it has the funding transaction. let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_1_lock.len(), 1); - assert!(nodes_1_lock.contains_key(channel_id)); + assert!(nodes_1_lock.contains_key(&channel_id)); } check_added_monitors!(nodes[1], 1); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); @@ -10026,7 +10027,7 @@ mod tests { let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update); - nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap(); nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &nodes_1_shutdown); @@ -10040,7 +10041,7 @@ mod tests { // party's signature for the fee negotiated closing transaction.) let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); - assert!(nodes_0_lock.contains_key(channel_id)); + assert!(nodes_0_lock.contains_key(&channel_id)); } { @@ -10050,7 +10051,7 @@ mod tests { // kept in the `nodes[1]`'s `id_to_peer` map. let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_1_lock.len(), 1); - assert!(nodes_1_lock.contains_key(channel_id)); + assert!(nodes_1_lock.contains_key(&channel_id)); } nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); @@ -10066,7 +10067,7 @@ mod tests { // doesn't have `nodes[0]`'s signature for the closing transaction yet. let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_1_lock.len(), 1); - assert!(nodes_1_lock.contains_key(channel_id)); + assert!(nodes_1_lock.contains_key(&channel_id)); } let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); @@ -10117,7 +10118,7 @@ mod tests { let nodes = create_network(2, &node_cfg, &node_chanmgr); // Dummy values - let channel_id = [4; 32]; + let channel_id = ChannelId::from_bytes([4; 32]); let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap()); let intercept_id = InterceptId([0; 32]); @@ -10172,11 +10173,11 @@ mod tests { check_added_monitors!(nodes[0], 1); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); } - open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected - open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, open_channel_msg.temporary_channel_id); @@ -10227,7 +10228,7 @@ mod tests { for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 { nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]); - open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, @@ -10267,7 +10268,7 @@ mod tests { for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER { nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be @@ -10319,7 +10320,7 @@ mod tests { _ => panic!("Unexpected event"), } get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk); - open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } // If we try to accept a channel from another peer non-0conf it will fail. @@ -10535,7 +10536,7 @@ mod tests { // If we provide a channel_id not associated with the peer, we should get an error and no updates // should be applied to ensure update atomicity as specified in the API docs. - let bad_channel_id = [10; 32]; + let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10); let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths; let new_fee = current_fee + 100; assert!( diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 7f1ded032..293c1da81 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -16,7 +16,7 @@ use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason}; use crate::events::bump_transaction::{BumpTransactionEventHandler, Wallet, WalletSource}; -use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; +use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret}; use crate::ln::channelmanager::{self, AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate}; use crate::routing::router::{self, PaymentParameters, Route}; @@ -570,11 +570,11 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { } } -pub fn create_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { +pub fn create_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) { create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001) } -pub fn create_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { +pub fn create_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) { let (channel_ready, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat); let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &channel_ready); (announcement, as_update, bs_update, channel_id, tx) @@ -868,7 +868,7 @@ macro_rules! get_monitor { for index in 0..2 { if let Ok(mon) = $node.chain_monitor.chain_monitor.get_monitor( $crate::chain::transaction::OutPoint { - txid: bitcoin::Txid::from_slice(&$channel_id[..]).unwrap(), index + txid: bitcoin::Txid::from_slice(&$channel_id.0[..]).unwrap(), index }) { monitor = Some(mon); @@ -1005,7 +1005,7 @@ macro_rules! reload_node { }; } -pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128) -> ([u8; 32], Transaction, OutPoint) { +pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128) -> (ChannelId, Transaction, OutPoint) { let chan_id = *node.network_chan_count.borrow(); let events = node.node.get_and_clear_pending_events(); @@ -1025,7 +1025,7 @@ pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_ _ => panic!("Unexpected event"), } } -pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: [u8; 32]) -> Transaction { +pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: ChannelId) -> Transaction { let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42); assert_eq!(temporary_channel_id, expected_temporary_channel_id); @@ -1068,7 +1068,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: & } // Receiver must have been initialized with manually_accept_inbound_channels set to true. -pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option) -> (bitcoin::Transaction, [u8; 32]) { +pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option) -> (bitcoin::Transaction, ChannelId) { let initiator_channels = initiator.node.list_usable_channels().len(); let receiver_channels = receiver.node.list_usable_channels().len(); @@ -1166,7 +1166,7 @@ pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_r node_recv.node.handle_channel_ready(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendChannelReady, node_recv.node.get_our_node_id())); } -pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32]) { +pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId) { let channel_id; let events_6 = node_conf.node.get_and_clear_pending_msg_events(); assert_eq!(events_6.len(), 3); @@ -1193,7 +1193,7 @@ pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv }), channel_id) } -pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32]) { +pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId) { let conf_height = core::cmp::max(node_a.best_block_info().1 + 1, node_b.best_block_info().1 + 1); create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx, conf_height); confirm_transaction_at(node_a, tx, conf_height); @@ -1202,7 +1202,7 @@ pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c: 'd, 'd>(node_a: create_chan_between_nodes_with_value_confirm_second(node_b, node_a) } -pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32], Transaction) { +pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId, Transaction) { let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat); let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx); (msgs, chan_id, tx) @@ -1242,11 +1242,11 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>(node_a: &Node<'a, 'b, ((*announcement).clone(), as_update, bs_update) } -pub fn create_announced_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { +pub fn create_announced_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) { create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001) } -pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { +pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) { let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat); update_nodes_with_chan_announce(nodes, a, b, &chan_announcement.0, &chan_announcement.1, &chan_announcement.2); (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4) @@ -1478,7 +1478,7 @@ macro_rules! check_closed_event { } } -pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) { +pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &ChannelId, funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) { let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) }; let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) }; let (tx_a, tx_b); @@ -2766,7 +2766,7 @@ pub enum HTLCType { NONE, TIMEOUT, SUCCESS } /// /// All broadcast transactions must be accounted for in one of the above three types of we'll /// also fail. -pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option, has_htlc_tx: HTLCType) -> Vec { +pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction), commitment_tx: Option, has_htlc_tx: HTLCType) -> Vec { let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap(); let mut txn_seen = HashSet::new(); node_txn.retain(|tx| txn_seen.insert(tx.txid())); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2fbc36ce9..d1b2db843 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -19,7 +19,7 @@ use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCK use crate::chain::transaction::OutPoint; use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; -use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; +use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; @@ -1505,7 +1505,7 @@ fn test_fee_spike_violation_fails_htlc() { _ => panic!("Unexpected event"), }; nodes[1].logger.assert_log("lightning::ln::channel".to_string(), - format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1); + format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); check_added_monitors!(nodes[1], 2); } @@ -5835,7 +5835,7 @@ fn test_fail_holding_cell_htlc_upon_free() { // us to surface its failure to the user. chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1); // Check that the payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -5923,7 +5923,7 @@ fn test_free_and_fail_holding_cell_htlcs() { // to surface its failure to the user. The first payment should succeed. chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1); // Check that the second payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -8003,7 +8003,7 @@ fn test_can_not_accept_unknown_inbound_channel() { let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); let nodes = create_network(2, &node_cfg, &node_chanmgr); - let unknown_channel_id = [0; 32]; + let unknown_channel_id = ChannelId::new_zero(); let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0); match api_res { Err(APIError::APIMisuseError { err }) => { @@ -9017,7 +9017,7 @@ fn test_error_chans_closed() { // A null channel ID should close all channels let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() }); + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 2); check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }, [nodes[1].node.get_our_node_id(); 2], 100000); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index dc8d92154..89ded2168 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -31,6 +31,7 @@ use bitcoin::{secp256k1, Witness}; use bitcoin::blockdata::script::Script; use bitcoin::hash_types::{Txid, BlockHash}; +use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; use crate::ln::onion_utils; use crate::onion_message; @@ -45,8 +46,6 @@ use crate::events::{MessageSendEventsProvider, OnionMessageProvider}; use crate::util::logger; use crate::util::ser::{LengthReadable, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize}; -use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; - use crate::routing::gossip::{NodeAlias, NodeId}; /// 21 million * 10^8 * 1000 @@ -111,7 +110,7 @@ pub struct ErrorMessage { /// /// All-0s indicates a general error unrelated to a specific channel, after which all channels /// with the sending peer should be closed. - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// A possibly human-readable error description. /// /// The string should be sanitized before it is used (e.g., emitted to logs or printed to @@ -128,7 +127,7 @@ pub struct WarningMessage { /// The channel ID involved in the warning. /// /// All-0s indicates a warning unrelated to a specific channel. - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// A possibly human-readable warning description. /// /// The string should be sanitized before it is used (e.g. emitted to logs or printed to @@ -171,7 +170,7 @@ pub struct OpenChannel { /// The genesis hash of the blockchain where the channel is to be opened pub chain_hash: BlockHash, /// A temporary channel ID, until the funding outpoint is announced - pub temporary_channel_id: [u8; 32], + pub temporary_channel_id: ChannelId, /// The channel value pub funding_satoshis: u64, /// The amount to push to the counterparty as part of the open, in milli-satoshi @@ -225,7 +224,7 @@ pub struct OpenChannelV2 { /// The genesis hash of the blockchain where the channel is to be opened pub chain_hash: BlockHash, /// A temporary channel ID derived using a zeroed out value for the channel acceptor's revocation basepoint - pub temporary_channel_id: [u8; 32], + pub temporary_channel_id: ChannelId, /// The feerate for the funding transaction set by the channel initiator pub funding_feerate_sat_per_1000_weight: u32, /// The feerate for the commitment transaction set by the channel initiator @@ -282,7 +281,7 @@ pub struct OpenChannelV2 { #[derive(Clone, Debug, PartialEq, Eq)] pub struct AcceptChannel { /// A temporary channel ID, until the funding outpoint is announced - pub temporary_channel_id: [u8; 32], + pub temporary_channel_id: ChannelId, /// The threshold below which outputs on transactions broadcast by sender will be omitted pub dust_limit_satoshis: u64, /// The maximum inbound HTLC value in flight towards sender, in milli-satoshi @@ -330,7 +329,7 @@ pub struct AcceptChannel { #[derive(Clone, Debug, PartialEq, Eq)] pub struct AcceptChannelV2 { /// The same `temporary_channel_id` received from the initiator's `open_channel2` message. - pub temporary_channel_id: [u8; 32], + pub temporary_channel_id: ChannelId, /// Part of the channel value contributed by the channel acceptor pub funding_satoshis: u64, /// The threshold below which outputs on transactions broadcast by the channel acceptor will be @@ -383,7 +382,7 @@ pub struct AcceptChannelV2 { #[derive(Clone, Debug, PartialEq, Eq)] pub struct FundingCreated { /// A temporary channel ID, until the funding is established - pub temporary_channel_id: [u8; 32], + pub temporary_channel_id: ChannelId, /// The funding transaction ID pub funding_txid: Txid, /// The specific output index funding this channel @@ -406,7 +405,7 @@ pub struct FundingCreated { #[derive(Clone, Debug, PartialEq, Eq)] pub struct FundingSigned { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The signature of the channel acceptor (fundee) on the initial commitment transaction pub signature: Signature, #[cfg(taproot)] @@ -420,7 +419,7 @@ pub struct FundingSigned { #[derive(Clone, Debug, PartialEq, Eq)] pub struct ChannelReady { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The per-commitment point of the second commitment transaction pub next_per_commitment_point: PublicKey, /// If set, provides a `short_channel_id` alias for this channel. @@ -436,7 +435,7 @@ pub struct ChannelReady { #[derive(Clone, Debug, PartialEq, Eq)] pub struct TxAddInput { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// A randomly chosen unique identifier for this input, which is even for initiators and odd for /// non-initiators. pub serial_id: u64, @@ -455,7 +454,7 @@ pub struct TxAddInput { #[derive(Clone, Debug, PartialEq, Eq)] pub struct TxAddOutput { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// A randomly chosen unique identifier for this output, which is even for initiators and odd for /// non-initiators. pub serial_id: u64, @@ -471,7 +470,7 @@ pub struct TxAddOutput { #[derive(Clone, Debug, PartialEq, Eq)] pub struct TxRemoveInput { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The serial ID of the input to be removed pub serial_id: u64, } @@ -482,7 +481,7 @@ pub struct TxRemoveInput { #[derive(Clone, Debug, PartialEq, Eq)] pub struct TxRemoveOutput { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The serial ID of the output to be removed pub serial_id: u64, } @@ -494,7 +493,7 @@ pub struct TxRemoveOutput { #[derive(Clone, Debug, PartialEq, Eq)] pub struct TxComplete { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, } /// A tx_signatures message containing the sender's signatures for a transaction constructed with @@ -504,7 +503,7 @@ pub struct TxComplete { #[derive(Clone, Debug, PartialEq, Eq)] pub struct TxSignatures { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The TXID pub tx_hash: Txid, /// The list of witnesses @@ -518,7 +517,7 @@ pub struct TxSignatures { #[derive(Clone, Debug, PartialEq, Eq)] pub struct TxInitRbf { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The locktime of the transaction pub locktime: u32, /// The feerate of the transaction @@ -535,7 +534,7 @@ pub struct TxInitRbf { #[derive(Clone, Debug, PartialEq, Eq)] pub struct TxAckRbf { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The number of satoshis the sender will contribute to or, if negative, remove from /// (e.g. splice-out) the funding output of the transaction pub funding_output_contribution: Option, @@ -547,7 +546,7 @@ pub struct TxAckRbf { #[derive(Clone, Debug, PartialEq, Eq)] pub struct TxAbort { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// Message data pub data: Vec, } @@ -558,7 +557,7 @@ pub struct TxAbort { #[derive(Clone, Debug, PartialEq, Eq)] pub struct Shutdown { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The destination of this peer's funds on closing. /// /// Must be in one of these forms: P2PKH, P2SH, P2WPKH, P2WSH, P2TR. @@ -585,7 +584,7 @@ pub struct ClosingSignedFeeRange { #[derive(Clone, Debug, PartialEq, Eq)] pub struct ClosingSigned { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The proposed total fee for the closing transaction pub fee_satoshis: u64, /// A signature on the closing transaction @@ -601,7 +600,7 @@ pub struct ClosingSigned { #[derive(Clone, Debug, PartialEq, Eq)] pub struct UpdateAddHTLC { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The HTLC ID pub htlc_id: u64, /// The HTLC value in milli-satoshi @@ -634,7 +633,7 @@ pub struct OnionMessage { #[derive(Clone, Debug, PartialEq, Eq)] pub struct UpdateFulfillHTLC { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The HTLC ID pub htlc_id: u64, /// The pre-image of the payment hash, allowing HTLC redemption @@ -647,7 +646,7 @@ pub struct UpdateFulfillHTLC { #[derive(Clone, Debug, PartialEq, Eq)] pub struct UpdateFailHTLC { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The HTLC ID pub htlc_id: u64, pub(crate) reason: OnionErrorPacket, @@ -659,7 +658,7 @@ pub struct UpdateFailHTLC { #[derive(Clone, Debug, PartialEq, Eq)] pub struct UpdateFailMalformedHTLC { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The HTLC ID pub htlc_id: u64, pub(crate) sha256_of_onion: [u8; 32], @@ -673,7 +672,7 @@ pub struct UpdateFailMalformedHTLC { #[derive(Clone, Debug, PartialEq, Eq)] pub struct CommitmentSigned { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// A signature on the commitment transaction pub signature: Signature, /// Signatures on the HTLC transactions @@ -689,7 +688,7 @@ pub struct CommitmentSigned { #[derive(Clone, Debug, PartialEq, Eq)] pub struct RevokeAndACK { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The secret corresponding to the per-commitment point pub per_commitment_secret: [u8; 32], /// The next sender-broadcast commitment transaction's per-commitment point @@ -705,7 +704,7 @@ pub struct RevokeAndACK { #[derive(Clone, Debug, PartialEq, Eq)] pub struct UpdateFee { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// Fee rate per 1000-weight of the transaction pub feerate_per_kw: u32, } @@ -716,7 +715,7 @@ pub struct UpdateFee { #[derive(Clone, Debug, PartialEq, Eq)] pub struct ChannelReestablish { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The next commitment number for the sender pub next_local_commitment_number: u64, /// The next commitment number for the recipient @@ -736,7 +735,7 @@ pub struct ChannelReestablish { #[derive(Clone, Debug, PartialEq, Eq)] pub struct AnnouncementSignatures { /// The channel ID - pub channel_id: [u8; 32], + pub channel_id: ChannelId, /// The short channel ID pub short_channel_id: u64, /// A signature by the node key @@ -2476,6 +2475,7 @@ mod tests { use bitcoin::{Transaction, PackedLockTime, TxIn, Script, Sequence, Witness, TxOut}; use hex; use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; + use crate::ln::ChannelId; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket}; use crate::routing::gossip::{NodeAlias, NodeId}; @@ -2506,7 +2506,7 @@ mod tests { }; let cr = msgs::ChannelReestablish { - channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0], + channel_id: ChannelId::from_bytes([4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0]), next_local_commitment_number: 3, next_remote_commitment_number: 4, your_last_per_commitment_secret: [9;32], @@ -2535,7 +2535,7 @@ mod tests { }; let cr = msgs::ChannelReestablish { - channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0], + channel_id: ChannelId::from_bytes([4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0]), next_local_commitment_number: 3, next_remote_commitment_number: 4, your_last_per_commitment_secret: [9;32], @@ -2587,7 +2587,7 @@ mod tests { let sig_1 = get_sig_on!(privkey, secp_ctx, String::from("01010101010101010101010101010101")); let sig_2 = get_sig_on!(privkey, secp_ctx, String::from("02020202020202020202020202020202")); let announcement_signatures = msgs::AnnouncementSignatures { - channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0], + channel_id: ChannelId::from_bytes([4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0]), short_channel_id: 2316138423780173, node_signature: sig_1, bitcoin_signature: sig_2, @@ -2823,7 +2823,7 @@ mod tests { let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx); let open_channel = msgs::OpenChannel { chain_hash: BlockHash::from_hex("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000").unwrap(), - temporary_channel_id: [2; 32], + temporary_channel_id: ChannelId::from_bytes([2; 32]), funding_satoshis: 1311768467284833366, push_msat: 2536655962884945560, dust_limit_satoshis: 3608586615801332854, @@ -2884,7 +2884,7 @@ mod tests { let (_, pubkey_7) = get_keys_from!("0707070707070707070707070707070707070707070707070707070707070707", secp_ctx); let open_channelv2 = msgs::OpenChannelV2 { chain_hash: BlockHash::from_hex("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000").unwrap(), - temporary_channel_id: [2; 32], + temporary_channel_id: ChannelId::from_bytes([2; 32]), funding_feerate_sat_per_1000_weight: 821716, commitment_feerate_sat_per_1000_weight: 821716, funding_satoshis: 1311768467284833366, @@ -2974,7 +2974,7 @@ mod tests { let (_, pubkey_5) = get_keys_from!("0505050505050505050505050505050505050505050505050505050505050505", secp_ctx); let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx); let accept_channel = msgs::AcceptChannel { - temporary_channel_id: [2; 32], + temporary_channel_id: ChannelId::from_bytes([2; 32]), dust_limit_satoshis: 1311768467284833366, max_htlc_value_in_flight_msat: 2536655962884945560, channel_reserve_satoshis: 3608586615801332854, @@ -3017,7 +3017,7 @@ mod tests { let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx); let (_, pubkey_7) = get_keys_from!("0707070707070707070707070707070707070707070707070707070707070707", secp_ctx); let accept_channelv2 = msgs::AcceptChannelV2 { - temporary_channel_id: [2; 32], + temporary_channel_id: ChannelId::from_bytes([2; 32]), funding_satoshis: 1311768467284833366, dust_limit_satoshis: 1311768467284833366, max_htlc_value_in_flight_msat: 2536655962884945560, @@ -3071,7 +3071,7 @@ mod tests { let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx); let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101")); let funding_created = msgs::FundingCreated { - temporary_channel_id: [2; 32], + temporary_channel_id: ChannelId::from_bytes([2; 32]), funding_txid: Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(), funding_output_index: 255, signature: sig_1, @@ -3091,7 +3091,7 @@ mod tests { let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx); let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101")); let funding_signed = msgs::FundingSigned { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), signature: sig_1, #[cfg(taproot)] partial_signature_with_nonce: None, @@ -3106,7 +3106,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let (_, pubkey_1,) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx); let channel_ready = msgs::ChannelReady { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), next_per_commitment_point: pubkey_1, short_channel_id_alias: None, }; @@ -3118,7 +3118,7 @@ mod tests { #[test] fn encoding_tx_add_input() { let tx_add_input = msgs::TxAddInput { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), serial_id: 4886718345, prevtx: TransactionU16LenLimited::new(Transaction { version: 2, @@ -3153,7 +3153,7 @@ mod tests { #[test] fn encoding_tx_add_output() { let tx_add_output = msgs::TxAddOutput { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), serial_id: 4886718345, sats: 4886718345, script: Address::from_str("bc1qxmk834g5marzm227dgqvynd23y2nvt2ztwcw2z").unwrap().script_pubkey(), @@ -3166,7 +3166,7 @@ mod tests { #[test] fn encoding_tx_remove_input() { let tx_remove_input = msgs::TxRemoveInput { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), serial_id: 4886718345, }; let encoded_value = tx_remove_input.encode(); @@ -3177,7 +3177,7 @@ mod tests { #[test] fn encoding_tx_remove_output() { let tx_remove_output = msgs::TxRemoveOutput { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), serial_id: 4886718345, }; let encoded_value = tx_remove_output.encode(); @@ -3188,7 +3188,7 @@ mod tests { #[test] fn encoding_tx_complete() { let tx_complete = msgs::TxComplete { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), }; let encoded_value = tx_complete.encode(); let target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap(); @@ -3198,7 +3198,7 @@ mod tests { #[test] fn encoding_tx_signatures() { let tx_signatures = msgs::TxSignatures { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), tx_hash: Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(), witnesses: vec![ Witness::from_vec(vec![ @@ -3232,7 +3232,7 @@ mod tests { fn do_encoding_tx_init_rbf(funding_value_with_hex_target: Option<(i64, &str)>) { let tx_init_rbf = msgs::TxInitRbf { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), locktime: 305419896, feerate_sat_per_1000_weight: 20190119, funding_output_contribution: if let Some((value, _)) = funding_value_with_hex_target { Some(value) } else { None }, @@ -3258,7 +3258,7 @@ mod tests { fn do_encoding_tx_ack_rbf(funding_value_with_hex_target: Option<(i64, &str)>) { let tx_ack_rbf = msgs::TxAckRbf { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), funding_output_contribution: if let Some((value, _)) = funding_value_with_hex_target { Some(value) } else { None }, }; let encoded_value = tx_ack_rbf.encode(); @@ -3281,7 +3281,7 @@ mod tests { #[test] fn encoding_tx_abort() { let tx_abort = msgs::TxAbort { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), data: hex::decode("54686520717569636B2062726F776E20666F78206A756D7073206F76657220746865206C617A7920646F672E").unwrap(), }; let encoded_value = tx_abort.encode(); @@ -3294,7 +3294,7 @@ mod tests { let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx); let script = Builder::new().push_opcode(opcodes::OP_TRUE).into_script(); let shutdown = msgs::Shutdown { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), scriptpubkey: if script_type == 1 { Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey() } else if script_type == 2 { Address::p2sh(&script, Network::Testnet).unwrap().script_pubkey() } @@ -3329,7 +3329,7 @@ mod tests { let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx); let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101")); let closing_signed = msgs::ClosingSigned { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), fee_satoshis: 2316138423780173, signature: sig_1, fee_range: None, @@ -3340,7 +3340,7 @@ mod tests { assert_eq!(msgs::ClosingSigned::read(&mut Cursor::new(&target_value)).unwrap(), closing_signed); let closing_signed_with_range = msgs::ClosingSigned { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), fee_satoshis: 2316138423780173, signature: sig_1, fee_range: Some(msgs::ClosingSignedFeeRange { @@ -3366,7 +3366,7 @@ mod tests { hmac: [2; 32] }; let update_add_htlc = msgs::UpdateAddHTLC { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, amount_msat: 3608586615801332854, payment_hash: PaymentHash([1; 32]), @@ -3382,7 +3382,7 @@ mod tests { #[test] fn encoding_update_fulfill_htlc() { let update_fulfill_htlc = msgs::UpdateFulfillHTLC { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, payment_preimage: PaymentPreimage([1; 32]), }; @@ -3397,7 +3397,7 @@ mod tests { data: [1; 32].to_vec(), }; let update_fail_htlc = msgs::UpdateFailHTLC { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, reason }; @@ -3409,7 +3409,7 @@ mod tests { #[test] fn encoding_update_fail_malformed_htlc() { let update_fail_malformed_htlc = msgs::UpdateFailMalformedHTLC { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, sha256_of_onion: [1; 32], failure_code: 255 @@ -3430,7 +3430,7 @@ mod tests { let sig_3 = get_sig_on!(privkey_3, secp_ctx, String::from("01010101010101010101010101010101")); let sig_4 = get_sig_on!(privkey_4, secp_ctx, String::from("01010101010101010101010101010101")); let commitment_signed = msgs::CommitmentSigned { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), signature: sig_1, htlc_signatures: if htlcs { vec![sig_2, sig_3, sig_4] } else { Vec::new() }, #[cfg(taproot)] @@ -3457,7 +3457,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx); let raa = msgs::RevokeAndACK { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), per_commitment_secret: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], next_per_commitment_point: pubkey_1, #[cfg(taproot)] @@ -3471,7 +3471,7 @@ mod tests { #[test] fn encoding_update_fee() { let update_fee = msgs::UpdateFee { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), feerate_per_kw: 20190119, }; let encoded_value = update_fee.encode(); @@ -3518,7 +3518,7 @@ mod tests { #[test] fn encoding_error() { let error = msgs::ErrorMessage { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), data: String::from("rust-lightning"), }; let encoded_value = error.encode(); @@ -3529,7 +3529,7 @@ mod tests { #[test] fn encoding_warning() { let error = msgs::WarningMessage { - channel_id: [2; 32], + channel_id: ChannelId::from_bytes([2; 32]), data: String::from("rust-lightning"), }; let encoded_value = error.encode(); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index c26628571..d3b1601f8 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -16,10 +16,10 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATE use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose}; -use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; +use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS}; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; use crate::ln::features::Bolt11InvoiceFeatures; -use crate::ln::{msgs, PaymentSecret, PaymentPreimage}; +use crate::ln::{msgs, ChannelId, PaymentSecret, PaymentPreimage}; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::outbound_payment::Retry; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; @@ -1703,7 +1703,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { }; // Check for unknown channel id error. - let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &[42; 32], nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); + let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable { err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.", log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) }); @@ -1732,7 +1732,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.", - log_bytes!(temp_chan_id), nodes[2].node.get_our_node_id()) }); + &temp_chan_id, nodes[2].node.get_our_node_id()) }); assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); // Open the just-in-time channel so the payment can then be forwarded. diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 0e4f4a13d..7565246fe 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -20,6 +20,7 @@ use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey}; use crate::sign::{KeysManager, NodeSigner, Recipient}; use crate::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider}; +use crate::ln::ChannelId; use crate::ln::features::{InitFeatures, NodeFeatures}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler}; @@ -186,7 +187,7 @@ impl ErroringMessageHandler { pub fn new() -> Self { Self { message_queue: Mutex::new(Vec::new()) } } - fn push_error(&self, node_id: &PublicKey, channel_id: [u8; 32]) { + fn push_error(&self, node_id: &PublicKey, channel_id: ChannelId) { self.message_queue.lock().unwrap().push(MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, data: "We do not support channel messages, sorry.".to_owned() }, @@ -1420,13 +1421,13 @@ impl { log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message"); - self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unsupported message compression: zlib".to_owned() }); + self.enqueue_message(peer, &msgs::WarningMessage { channel_id: ChannelId::new_zero(), data: "Unsupported message compression: zlib".to_owned() }); continue; } (_, Some(ty)) if is_gossip_msg(ty) => { log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message"); self.enqueue_message(peer, &msgs::WarningMessage { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), data: format!("Unreadable/bogus gossip message of type {}", ty), }); continue; @@ -1592,7 +1593,7 @@ impl { log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data)); self.message_handler.chan_handler.handle_error(&their_node_id, &msg); - if msg.channel_id == [0; 32] { + if msg.channel_id.is_zero() { return Err(PeerHandleError { }.into()); } }, @@ -1913,31 +1914,31 @@ impl { log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.temporary_channel_id)); + &msg.temporary_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.temporary_channel_id)); + &msg.temporary_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.temporary_channel_id)); + &msg.temporary_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.temporary_channel_id)); + &msg.temporary_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", log_pubkey!(node_id), - log_bytes!(msg.temporary_channel_id), + &msg.temporary_channel_id, log_funding_channel_id!(msg.funding_txid, msg.funding_output_index)); // TODO: If the peer is gone we should generate a DiscardFunding event // indicating to the wallet that they should just throw away this funding transaction @@ -1946,73 +1947,73 @@ impl { log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendChannelReady { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { @@ -2021,7 +2022,7 @@ impl { log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendShutdown { ref node_id, ref msg } => { log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}", log_pubkey!(node_id), - log_bytes!(msg.channel_id)); + &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => { @@ -2482,6 +2483,7 @@ mod tests { use crate::sign::{NodeSigner, Recipient}; use crate::events; use crate::io; + use crate::ln::ChannelId; use crate::ln::features::{InitFeatures, NodeFeatures}; use crate::ln::peer_channel_encryptor::PeerChannelEncryptor; use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses}; @@ -2721,7 +2723,7 @@ mod tests { .push(crate::events::MessageSendEvent::SendShutdown { node_id: peers[1].node_signer.get_node_id(Recipient::Node).unwrap(), msg: msgs::Shutdown { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), scriptpubkey: bitcoin::Script::new(), }, }); @@ -2729,7 +2731,7 @@ mod tests { .push(crate::events::MessageSendEvent::SendShutdown { node_id: peers[0].node_signer.get_node_id(Recipient::Node).unwrap(), msg: msgs::Shutdown { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), scriptpubkey: bitcoin::Script::new(), }, }); @@ -2858,7 +2860,7 @@ mod tests { let their_id = peers[1].node_signer.get_node_id(Recipient::Node).unwrap(); - let msg = msgs::Shutdown { channel_id: [42; 32], scriptpubkey: bitcoin::Script::new() }; + let msg = msgs::Shutdown { channel_id: ChannelId::from_bytes([42; 32]), scriptpubkey: bitcoin::Script::new() }; a_chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::SendShutdown { node_id: their_id, msg: msg.clone() }); diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index bb5531242..64f3585b9 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -23,6 +23,7 @@ use bitcoin::network::constants::Network; use bitcoin::blockdata::constants::genesis_block; use crate::events::{MessageSendEvent, MessageSendEventsProvider}; +use crate::ln::ChannelId; use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures}; use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter}; @@ -382,7 +383,7 @@ macro_rules! secp_verify_sig { err: format!("Invalid signature on {} message", $msg_type), action: ErrorAction::SendWarningMessage { msg: msgs::WarningMessage { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), data: format!("Invalid signature on {} message", $msg_type), }, log_level: Level::Trace, @@ -400,7 +401,7 @@ macro_rules! get_pubkey_from_node_id { err: format!("Invalid public key on {} message", $msg_type), action: ErrorAction::SendWarningMessage { msg: msgs::WarningMessage { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), data: format!("Invalid public key on {} message", $msg_type), }, log_level: Level::Trace diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 1f143ed93..f9d8b014b 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -2683,6 +2683,7 @@ mod tests { use crate::routing::test_utils::{add_channel, add_or_update_node, build_graph, build_line_graph, id_to_feature_flags, get_nodes, update_channel}; use crate::chain::transaction::OutPoint; use crate::sign::EntropySource; + use crate::ln::ChannelId; use crate::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures, ChannelFeatures, InitFeatures, NodeFeatures}; use crate::ln::msgs::{ErrorAction, LightningError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; use crate::ln::channelmanager; @@ -2715,7 +2716,7 @@ mod tests { fn get_channel_details(short_channel_id: Option, node_id: PublicKey, features: InitFeatures, outbound_capacity_msat: u64) -> channelmanager::ChannelDetails { channelmanager::ChannelDetails { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), counterparty: channelmanager::ChannelCounterparty { features, node_id, @@ -6881,6 +6882,7 @@ pub(crate) mod bench_utils { use crate::chain::transaction::OutPoint; use crate::sign::{EntropySource, KeysManager}; + use crate::ln::ChannelId; use crate::ln::channelmanager::{self, ChannelCounterparty, ChannelDetails}; use crate::ln::features::Bolt11InvoiceFeatures; use crate::routing::gossip::NetworkGraph; @@ -6934,7 +6936,7 @@ pub(crate) mod bench_utils { #[inline] pub(crate) fn first_hop(node_id: PublicKey) -> ChannelDetails { ChannelDetails { - channel_id: [0; 32], + channel_id: ChannelId::new_zero(), counterparty: ChannelCounterparty { features: channelmanager::provided_init_features(&UserConfig::default()), node_id, diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index e79980370..4836b4d68 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -15,7 +15,6 @@ use bitcoin::blockdata::transaction::Transaction; use crate::routing::router::Route; use crate::ln::chan_utils::HTLCClaim; -use crate::util::logger::DebugBytes; macro_rules! log_iter { ($obj: expr) => { @@ -42,10 +41,7 @@ macro_rules! log_bytes { pub(crate) struct DebugFundingChannelId<'a>(pub &'a Txid, pub u16); impl<'a> core::fmt::Display for DebugFundingChannelId<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - for i in (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().iter() { - write!(f, "{:02x}", i)?; - } - Ok(()) + (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().fmt(f) } } macro_rules! log_funding_channel_id { @@ -57,7 +53,7 @@ macro_rules! log_funding_channel_id { pub(crate) struct DebugFundingInfo<'a, T: 'a>(pub &'a (OutPoint, T)); impl<'a, T> core::fmt::Display for DebugFundingInfo<'a, T> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - DebugBytes(&(self.0).0.to_channel_id()[..]).fmt(f) + (self.0).0.to_channel_id().fmt(f) } } macro_rules! log_funding_info { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index bcd460ee1..1eef3e12b 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -20,6 +20,7 @@ use crate::chain::transaction::OutPoint; use crate::sign; use crate::events; use crate::events::bump_transaction::{WalletSource, Utxo}; +use crate::ln::ChannelId; use crate::ln::channelmanager; use crate::ln::chan_utils::CommitmentTransaction; use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; @@ -196,14 +197,14 @@ impl SignerProvider for OnlyReadsKeysInterface { pub struct TestChainMonitor<'a> { pub added_monitors: Mutex)>>, - pub monitor_updates: Mutex>>, - pub latest_monitor_update_id: Mutex>, + pub monitor_updates: Mutex>>, + pub latest_monitor_update_id: Mutex>, pub chain_monitor: chainmonitor::ChainMonitor>, pub keys_manager: &'a TestKeysInterface, /// If this is set to Some(), the next update_channel call (not watch_channel) must be a /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given /// boolean. - pub expect_channel_force_closed: Mutex>, + pub expect_channel_force_closed: Mutex>, } impl<'a> TestChainMonitor<'a> { pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist, keys_manager: &'a TestKeysInterface) -> Self { @@ -217,7 +218,7 @@ impl<'a> TestChainMonitor<'a> { } } - pub fn complete_sole_pending_chan_update(&self, channel_id: &[u8; 32]) { + pub fn complete_sole_pending_chan_update(&self, channel_id: &ChannelId) { let (outpoint, _, latest_update) = self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone(); self.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap(); }