X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=82f48c3a73b59d05a208130d83d8c1e9863fecdf;hb=2223e92ac6b1ef0b11ac7448ed35f5d0adf77aaa;hp=62629fc548679dfa56b9a58ae711704942657e06;hpb=fac5373687a4c7919c8639744dc712d922082cc3;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 62629fc5..82f48c3a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9,7 +9,7 @@ //! The top-level channel management and payment tracking stuff lives here. //! -//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is +//! The [`ChannelManager`] is the main chunk of logic implementing the lightning protocol and is //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those //! upon reconnect to the relevant peer(s). //! @@ -53,12 +53,13 @@ use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VA use crate::ln::outbound_payment; use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment}; use crate::ln::wire::Encode; -use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner}; +use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig}; use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::util::events; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; +use crate::util::string::UntrustedString; use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; use crate::util::logger::{Level, Logger}; use crate::util::errors::APIError; @@ -198,7 +199,8 @@ struct ClaimableHTLC { } /// A payment identifier used to uniquely identify a payment to LDK. -/// (C-not exported) as we just use [u8; 32] directly +/// +/// This is not exported to bindings users as we just use [u8; 32] directly #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct PaymentId(pub [u8; 32]); @@ -216,7 +218,8 @@ impl Readable for PaymentId { } /// An identifier used to uniquely identify an intercepted HTLC to LDK. -/// (C-not exported) as we just use [u8; 32] directly +/// +/// This is not exported to bindings users as we just use [u8; 32] directly #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct InterceptId(pub [u8; 32]); @@ -275,11 +278,6 @@ pub(crate) enum HTLCSource { first_hop_htlc_msat: u64, payment_id: PaymentId, payment_secret: Option, - /// Note that this is now "deprecated" - we write it for forwards (and read it for - /// backwards) compatibility reasons, but prefer to use the data in the - /// [`super::outbound_payment`] module, which stores per-payment data once instead of in - /// each HTLC. - payment_params: Option, }, } #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash @@ -290,14 +288,13 @@ impl core::hash::Hash for HTLCSource { 0u8.hash(hasher); prev_hop_data.hash(hasher); }, - HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payment_params } => { + HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat } => { 1u8.hash(hasher); path.hash(hasher); session_priv[..].hash(hasher); payment_id.hash(hasher); payment_secret.hash(hasher); first_hop_htlc_msat.hash(hasher); - payment_params.hash(hasher); }, } } @@ -312,7 +309,6 @@ impl HTLCSource { first_hop_htlc_msat: 0, payment_id: PaymentId([2; 32]), payment_secret: None, - payment_params: None, } } } @@ -566,15 +562,16 @@ struct PendingInboundPayment { min_value_msat: Option, } -/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g. -/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static +/// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g. +/// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case -/// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents -/// issues such as overly long function definitions. Note that the ChannelManager can take any type -/// that implements KeysInterface or Router for its keys manager and router, respectively, but this -/// type alias chooses the concrete types of KeysManager and DefaultRouter. +/// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents +/// issues such as overly long function definitions. Note that the `ChannelManager` can take any type +/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager, +/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types +/// of [`KeysManager`] and [`DefaultRouter`]. /// -/// (C-not exported) as Arcs don't make sense in bindings +/// This is not exported to bindings users as Arcs don't make sense in bindings pub type SimpleArcChannelManager = ChannelManager< Arc, Arc, @@ -590,50 +587,49 @@ pub type SimpleArcChannelManager = ChannelManager< Arc >; -/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference -/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't +/// [`SimpleRefChannelManager`] is a type alias for a ChannelManager reference, and is the reference +/// counterpart to the [`SimpleArcChannelManager`] type alias. Use this type by default when you don't /// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as -/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes). +/// usage of lightning-net-tokio (since `tokio::spawn` requires parameters with static lifetimes). /// But if this is not necessary, using a reference is more efficient. Defining these type aliases /// issues such as overly long function definitions. Note that the ChannelManager can take any type -/// that implements KeysInterface or Router for its keys manager and router, respectively, but this -/// type alias chooses the concrete types of KeysManager and DefaultRouter. +/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager, +/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types +/// of [`KeysManager`] and [`DefaultRouter`]. /// -/// (C-not exported) as Arcs don't make sense in bindings +/// This is not exported to bindings users as Arcs don't make sense in bindings pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. /// -/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through +/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through /// to individual Channels. /// -/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for +/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for /// all peers during write/read (though does not modify this instance, only the instance being -/// serialized). This will result in any channels which have not yet exchanged funding_created (ie -/// called funding_transaction_generated for outbound channels). +/// serialized). This will result in any channels which have not yet exchanged [`funding_created`] (i.e., +/// called [`funding_transaction_generated`] for outbound channels) being closed. /// -/// Note that you can be a bit lazier about writing out ChannelManager than you can be with -/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before -/// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates -/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during +/// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with +/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before +/// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates +/// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during /// the serialization process). If the deserialized version is out-of-date compared to the -/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the -/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees). +/// [`ChannelMonitor`] passed by reference to [`read`], those channels will be force-closed based on the +/// `ChannelMonitor` state and no funds will be lost (mod on-chain transaction fees). /// -/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which -/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along -/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call -/// block_connected() to step towards your best block) upon deserialization before using the -/// object! +/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which +/// tells you the last block hash which was connected. You should get the best block tip before using the manager. +/// See [`chain::Listen`] and [`chain::Confirm`] for more details. /// -/// Note that ChannelManager is responsible for tracking liveness of its channels and generating -/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid +/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating +/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been /// offline for a full minute. In order to track this, you must call -/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect. +/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect. /// -/// To avoid trivial DoS issues, ChannelManager limits the number of inbound connections and +/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and /// inbound channels without confirmed funding transactions. This may result in nodes which we do /// not have a channel with being unable to connect to us or open new channels with us if we have /// many peers with unfunded channels. @@ -642,11 +638,20 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = C /// exempted from the count of unfunded channels. Similarly, outbound channels and connections are /// never limited. Please ensure you limit the count of such channels yourself. /// -/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager -/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but -/// essentially you should default to using a SimpleRefChannelManager, and use a -/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when +/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`] +/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but +/// essentially you should default to using a [`SimpleRefChannelManager`], and use a +/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when /// you're using lightning-net-tokio. +/// +/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected +/// [`funding_created`]: msgs::FundingCreated +/// [`funding_transaction_generated`]: Self::funding_transaction_generated +/// [`BlockHash`]: bitcoin::hash_types::BlockHash +/// [`update_channel`]: chain::Watch::update_channel +/// [`ChannelUpdate`]: msgs::ChannelUpdate +/// [`timer_tick_occurred`]: Self::timer_tick_occurred +/// [`read`]: ReadableArgs::read // // Lock order: // The tree structure below illustrates the lock order requirements for the different locks of the @@ -1047,7 +1052,7 @@ pub struct ChannelCounterparty { pub outbound_htlc_maximum_msat: Option, } -/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels +/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`] #[derive(Clone, Debug, PartialEq)] pub struct ChannelDetails { /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes, @@ -1118,6 +1123,11 @@ pub struct ChannelDetails { /// inbound. This may be zero for inbound channels serialized with LDK versions prior to /// 0.0.113. pub user_channel_id: u128, + /// The currently negotiated fee rate denominated in satoshi per 1000 weight units, + /// which is applied to commitment and HTLC transactions. + /// + /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115. + pub feerate_sat_per_1000_weight: Option, /// Our total balance. This is the amount we would get if we close the channel. /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this /// amount is not likely to be recoverable on close. @@ -1229,6 +1239,56 @@ impl ChannelDetails { pub fn get_outbound_payment_scid(&self) -> Option { self.short_channel_id.or(self.outbound_scid_alias) } + + fn from_channel(channel: &Channel, + best_block_height: u32, latest_features: InitFeatures) -> Self { + + let balance = channel.get_available_balances(); + let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = + channel.get_holder_counterparty_selected_channel_reserve_satoshis(); + ChannelDetails { + channel_id: channel.channel_id(), + counterparty: ChannelCounterparty { + node_id: channel.get_counterparty_node_id(), + features: latest_features, + unspendable_punishment_reserve: to_remote_reserve_satoshis, + forwarding_info: channel.counterparty_forwarding_info(), + // Ensures that we have actually received the `htlc_minimum_msat` value + // from the counterparty through the `OpenChannel` or `AcceptChannel` + // message (as they are always the first message from the counterparty). + // Else `Channel::get_counterparty_htlc_minimum_msat` could return the + // default `0` value set by `Channel::new_outbound`. + outbound_htlc_minimum_msat: if channel.have_received_message() { + Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, + outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), + }, + funding_txo: channel.get_funding_txo(), + // Note that accept_channel (or open_channel) is always the first message, so + // `have_received_message` indicates that type negotiation has completed. + channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, + short_channel_id: channel.get_short_channel_id(), + outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, + inbound_scid_alias: channel.latest_inbound_scid_alias(), + channel_value_satoshis: channel.get_value_satoshis(), + feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()), + unspendable_punishment_reserve: to_self_reserve_satoshis, + balance_msat: balance.balance_msat, + inbound_capacity_msat: balance.inbound_capacity_msat, + outbound_capacity_msat: balance.outbound_capacity_msat, + next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, + user_channel_id: channel.get_user_id(), + confirmations_required: channel.minimum_depth(), + confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), + force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), + is_outbound: channel.is_outbound(), + is_channel_ready: channel.is_usable(), + is_usable: channel.is_live(), + is_public: channel.should_announce(), + inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), + inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), + config: Some(channel.config()), + } + } } /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments. @@ -1543,16 +1603,21 @@ where R::Target: Router, L::Target: Logger, { - /// Constructs a new ChannelManager to hold several channels and route between them. + /// Constructs a new `ChannelManager` to hold several channels and route between them. /// /// This is the main "logic hub" for all channel-related actions, and implements - /// ChannelMessageHandler. + /// [`ChannelMessageHandler`]. /// /// Non-proportional fees are fixed according to our risk using the provided fee estimator. /// - /// Users need to notify the new ChannelManager when a new block is connected or - /// disconnected using its `block_connected` and `block_disconnected` methods, starting - /// from after `params.latest_hash`. + /// Users need to notify the new `ChannelManager` when a new block is connected or + /// disconnected using its [`block_connected`] and [`block_disconnected`] methods, starting + /// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for + /// more details. + /// + /// [`block_connected`]: chain::Listen::block_connected + /// [`block_disconnected`]: chain::Listen::block_disconnected + /// [`params.best_block.block_hash`]: chain::BestBlock::block_hash pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self { let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); @@ -1716,58 +1781,17 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for (channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { - let balance = channel.get_available_balances(); - let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - channel.get_holder_counterparty_selected_channel_reserve_satoshis(); - res.push(ChannelDetails { - channel_id: (*channel_id).clone(), - counterparty: ChannelCounterparty { - node_id: channel.get_counterparty_node_id(), - features: peer_state.latest_features.clone(), - unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: channel.counterparty_forwarding_info(), - // Ensures that we have actually received the `htlc_minimum_msat` value - // from the counterparty through the `OpenChannel` or `AcceptChannel` - // message (as they are always the first message from the counterparty). - // Else `Channel::get_counterparty_htlc_minimum_msat` could return the - // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if channel.have_received_message() { - Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), - }, - funding_txo: channel.get_funding_txo(), - // Note that accept_channel (or open_channel) is always the first message, so - // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, - short_channel_id: channel.get_short_channel_id(), - outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, - inbound_scid_alias: channel.latest_inbound_scid_alias(), - channel_value_satoshis: channel.get_value_satoshis(), - unspendable_punishment_reserve: to_self_reserve_satoshis, - balance_msat: balance.balance_msat, - inbound_capacity_msat: balance.inbound_capacity_msat, - outbound_capacity_msat: balance.outbound_capacity_msat, - next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, - user_channel_id: channel.get_user_id(), - confirmations_required: channel.minimum_depth(), - confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), - force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), - is_outbound: channel.is_outbound(), - is_channel_ready: channel.is_usable(), - is_usable: channel.is_live(), - is_public: channel.should_announce(), - inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), - config: Some(channel.config()), - }); + for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { + let details = ChannelDetails::from_channel(channel, best_block_height, + peer_state.latest_features.clone()); + res.push(details); } } } res } - /// Gets the list of open channels, in random order. See ChannelDetail field documentation for + /// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for /// more information. pub fn list_channels(&self) -> Vec { self.list_channels_with_filter(|_| true) @@ -1786,6 +1810,24 @@ where self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } + /// Gets the list of channels we have with a given counterparty, in random order. + pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec { + let best_block_height = self.best_block.read().unwrap().height(); + let per_peer_state = self.per_peer_state.read().unwrap(); + + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let features = &peer_state.latest_features; + return peer_state.channel_by_id + .iter() + .map(|(_, channel)| + ChannelDetails::from_channel(channel, best_block_height, features.clone())) + .collect(); + } + vec![] + } + /// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a /// successful path, or have unresolved HTLCs. /// @@ -1902,11 +1944,12 @@ where /// would appear on a force-closure transaction, whichever is lower. We will allow our /// counterparty to pay as much fee as they'd like, however. /// - /// May generate a SendShutdown message event on success, which should be relayed. + /// May generate a [`SendShutdown`] message event on success, which should be relayed. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`SendShutdown`]: crate::util::events::MessageSendEvent::SendShutdown pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, None) } @@ -1925,11 +1968,12 @@ where /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which /// will appear on a force-closure transaction, whichever is lower). /// - /// May generate a SendShutdown message event on success, which should be relayed. + /// May generate a [`SendShutdown`] message event on success, which should be relayed. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`SendShutdown`]: crate::util::events::MessageSendEvent::SendShutdown pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight)) } @@ -1965,7 +2009,7 @@ where let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }); } else { self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); } @@ -2374,13 +2418,16 @@ where pending_forward_info } - /// Gets the current channel_update for the given channel. This first checks if the channel is + /// Gets the current [`channel_update`] for the given channel. This first checks if the channel is /// public, and thus should be called whenever the result is going to be passed out in a /// [`MessageSendEvent::BroadcastChannelUpdate`] event. /// - /// Note that in `internal_closing_signed`, this function is called without the `peer_state` + /// Note that in [`internal_closing_signed`], this function is called without the `peer_state` /// corresponding to the channel's counterparty locked, as the channel been removed from the /// storage and the `peer_state` lock has been dropped. + /// + /// [`channel_update`]: msgs::ChannelUpdate + /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { if !chan.should_announce() { return Err(LightningError { @@ -2395,14 +2442,17 @@ where self.get_channel_update_for_unicast(chan) } - /// Gets the current channel_update for the given channel. This does not check if the channel - /// is public (only returning an Err if the channel does not yet have an assigned short_id), + /// Gets the current [`channel_update`] for the given channel. This does not check if the channel + /// is public (only returning an `Err` if the channel does not yet have an assigned SCID), /// and thus MUST NOT be called unless the recipient of the resulting message has already /// provided evidence that they know about the existence of the channel. /// - /// Note that through `internal_closing_signed`, this function is called without the + /// Note that through [`internal_closing_signed`], this function is called without the /// `peer_state` corresponding to the channel's counterparty locked, as the channel been /// removed from the storage and the `peer_state` lock has been dropped. + /// + /// [`channel_update`]: msgs::ChannelUpdate + /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { @@ -2441,12 +2491,12 @@ where } #[cfg(test)] - pub(crate) fn test_send_payment_along_path(&self, path: &Vec, payment_params: &Option, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { + pub(crate) fn test_send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { let _lck = self.total_consistency_lock.read().unwrap(); - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes) + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes) } - fn send_payment_along_path(&self, path: &Vec, payment_params: &Option, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { + fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { // The top-level caller should hold the total_consistency_lock read lock. debug_assert!(self.total_consistency_lock.try_write().is_err()); @@ -2485,7 +2535,6 @@ where first_hop_htlc_msat: htlc_msat, payment_id, payment_secret: payment_secret.clone(), - payment_params: payment_params.clone(), }, onion_packet, &self.logger); match break_chan_entry!(self, send_res, chan) { Some(monitor_update) => { @@ -2529,7 +2578,7 @@ where /// Value parameters are provided via the last hop in route, see documentation for [`RouteHop`] /// fields for more info. /// - /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via + /// May generate [`UpdateHTLCs`] message(s) event on success, which should be relayed (e.g. via /// [`PeerManager::process_events`]). /// /// # Avoiding Duplicate Payments @@ -2553,7 +2602,7 @@ where /// /// # Possible Error States on [`PaymentSendFailure`] /// - /// Each path may have a different return value, and PaymentSendValue may return a Vec with + /// Each path may have a different return value, and [`PaymentSendFailure`] may return a `Vec` with /// each entry matching the corresponding-index entry in the route paths, see /// [`PaymentSendFailure`] for more info. /// @@ -2566,7 +2615,7 @@ where /// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the /// relevant updates. /// - /// Note that depending on the type of the PaymentSendFailure the HTLC may have been + /// Note that depending on the type of the [`PaymentSendFailure`] the HTLC may have been /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a /// different route unless you intend to pay twice! /// @@ -2584,6 +2633,7 @@ where /// /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`Event::PaymentFailed`]: events::Event::PaymentFailed + /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { @@ -2591,8 +2641,8 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.pending_outbound_payments .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on @@ -2605,8 +2655,8 @@ where &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, &self.pending_events, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } #[cfg(test)] @@ -2614,8 +2664,8 @@ where let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } #[cfg(test)] @@ -2667,8 +2717,8 @@ where self.pending_outbound_payments.send_spontaneous_payment_with_route( route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route @@ -2685,8 +2735,8 @@ where retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, &self.pending_events, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } /// Send a payment that is probing the given route for liquidity. We calculate the @@ -2696,8 +2746,8 @@ where let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a @@ -3421,8 +3471,8 @@ where self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.pending_events, &self.logger, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)); + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)); for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); @@ -3474,18 +3524,18 @@ where fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { if !chan.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother - if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { + if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } if !chan.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", - log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.logger); NotifyOption::DoPersist @@ -3520,15 +3570,18 @@ where /// /// This currently includes: /// * Increasing or decreasing the on-chain feerate estimates for our outbound channels, - /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more + /// * Broadcasting [`ChannelUpdate`] messages if we've been disconnected from our peer for more /// than a minute, informing the network that they should no longer attempt to route over /// the channel. - /// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs - /// with the current `ChannelConfig`. + /// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs + /// with the current [`ChannelConfig`]. /// * Removing peers which have disconnected but and no longer have any channels. /// - /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate + /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate /// estimate fetches. + /// + /// [`ChannelUpdate`]: msgs::ChannelUpdate + /// [`ChannelConfig`]: crate::util::config::ChannelConfig pub fn timer_tick_occurred(&self) { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = NotifyOption::SkipPersist; @@ -3808,9 +3861,9 @@ where // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { - HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => { + HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => { if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, - session_priv, payment_id, payment_params, self.probing_cookie_secret, &self.secp_ctx, + session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx, &self.pending_events, &self.logger) { self.push_pending_forwards_ev(); } }, @@ -5142,7 +5195,7 @@ where Ok(()) } - /// Process pending events from the `chain::Watch`, returning whether any events were processed. + /// Process pending events from the [`chain::Watch`], returning whether any events were processed. fn process_pending_monitor_events(&self) -> bool { debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock @@ -6469,8 +6522,8 @@ pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelType /// [`ChannelManager`]. pub fn provided_init_features(_config: &UserConfig) -> InitFeatures { // Note that if new features are added here which other peers may (eventually) require, we - // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for - // ErroringMessageHandler. + // should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for + // [`ErroringMessageHandler`]. let mut features = InitFeatures::empty(); features.set_data_loss_protect_optional(); features.set_upfront_shutdown_script_optional(); @@ -6544,6 +6597,7 @@ impl Writeable for ChannelDetails { (33, self.inbound_htlc_minimum_msat, option), (35, self.inbound_htlc_maximum_msat, option), (37, user_channel_id_high_opt, option), + (39, self.feerate_sat_per_1000_weight, option), }); Ok(()) } @@ -6579,6 +6633,7 @@ impl Readable for ChannelDetails { (33, inbound_htlc_minimum_msat, option), (35, inbound_htlc_maximum_msat, option), (37, user_channel_id_high_opt, option), + (39, feerate_sat_per_1000_weight, option), }); // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with @@ -6612,6 +6667,7 @@ impl Readable for ChannelDetails { is_public: is_public.0.unwrap(), inbound_htlc_minimum_msat, inbound_htlc_maximum_msat, + feerate_sat_per_1000_weight, }) } } @@ -6832,7 +6888,6 @@ impl Readable for HTLCSource { path, payment_id: payment_id.unwrap(), payment_secret, - payment_params, }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), @@ -6844,7 +6899,7 @@ impl Readable for HTLCSource { impl Writeable for HTLCSource { fn write(&self, writer: &mut W) -> Result<(), crate::io::Error> { match self { - HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payment_params } => { + HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret } => { 0u8.write(writer)?; let payment_id_opt = Some(payment_id); write_tlv_fields!(writer, { @@ -6853,7 +6908,7 @@ impl Writeable for HTLCSource { (2, first_hop_htlc_msat, required), (3, payment_secret, option), (4, *path, vec_type), - (5, payment_params, option), + (5, None::, option), // payment_params in LDK versions prior to 0.0.115 }); } HTLCSource::PreviousHopData(ref field) => { @@ -7183,7 +7238,7 @@ where /// In such cases the latest local transactions will be sent to the tx_broadcaster included in /// this struct. /// - /// (C-not exported) because we have no HashMap bindings + /// This is not exported to bindings users because we have no HashMap bindings pub channel_monitors: HashMap::Signer>>, } @@ -7854,8 +7909,10 @@ mod tests { // to connect messages with new values chan.0.contents.fee_base_msat *= 2; chan.1.contents.fee_base_msat *= 2; - let node_a_chan_info = nodes[0].node.list_channels()[0].clone(); - let node_b_chan_info = nodes[1].node.list_channels()[0].clone(); + let node_a_chan_info = nodes[0].node.list_channels_with_counterparty( + &nodes[1].node.get_our_node_id()).pop().unwrap(); + let node_b_chan_info = nodes[1].node.list_channels_with_counterparty( + &nodes[0].node.get_our_node_id()).pop().unwrap(); // The first two nodes (which opened a channel) should now require fresh persistence assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); @@ -7931,7 +7988,7 @@ mod tests { // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap(); - nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); + nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7961,7 +8018,7 @@ mod tests { expect_payment_failed!(nodes[0], our_payment_hash, true); // Send the second half of the original MPP payment. - nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); + nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -8167,7 +8224,7 @@ mod tests { assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1); } #[test] @@ -8210,7 +8267,7 @@ mod tests { assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "We don't support MPP keysend payments", 1); } #[test] @@ -8238,7 +8295,8 @@ mod tests { match nodes[0].node.send_payment(&route, payment_hash, &None, PaymentId(payment_hash.0)).unwrap_err() { PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => { - assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) }, + assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) + }, _ => panic!("unexpected error") } } @@ -8298,7 +8356,7 @@ mod tests { match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) { Ok(_) => panic!("Unexpected ok"), Err(()) => { - nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1); } } @@ -8515,7 +8573,7 @@ mod tests { // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); - assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id, + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, open_channel_msg.temporary_channel_id); // Further, because all of our channels with nodes[0] are inbound, and none of them funded, @@ -8562,7 +8620,7 @@ mod tests { open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); } nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); - assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id, + assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, open_channel_msg.temporary_channel_id); // Of course, however, outbound channels are always allowed @@ -8604,7 +8662,7 @@ mod tests { // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be // rejected. nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); - assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id, + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, open_channel_msg.temporary_channel_id); // but we can still open an outbound channel. @@ -8613,7 +8671,7 @@ mod tests { // but even with such an outbound channel, additional inbound channels will still fail. nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); - assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id, + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, open_channel_msg.temporary_channel_id); } @@ -8669,7 +8727,7 @@ mod tests { } _ => panic!("Unexpected event"), } - assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id, + assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, open_channel_msg.temporary_channel_id); // ...however if we accept the same channel 0conf it should work just fine. @@ -8711,7 +8769,7 @@ mod tests { _ => panic!("Unexpected event"), } - let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()); + let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); @@ -8877,7 +8935,7 @@ pub mod bench { let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap()); $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]); $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg); - let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id()); + let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, &$node_a.get_our_node_id()); $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa); $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs); $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id())); @@ -8896,7 +8954,7 @@ pub mod bench { _ => panic!("Failed to generate claim event"), } - let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id()); + let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, &$node_b.get_our_node_id()); $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa); $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs); $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));