X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=82f48c3a73b59d05a208130d83d8c1e9863fecdf;hb=2223e92ac6b1ef0b11ac7448ed35f5d0adf77aaa;hp=675122cfb4df890b4c89ebff1b60aab95e7cf12f;hpb=4e002dcf5c97f506cf8e79e9fab02d71808cae5f;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 675122cf..82f48c3a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9,15 +9,13 @@ //! The top-level channel management and payment tracking stuff lives here. //! -//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is +//! The [`ChannelManager`] is the main chunk of logic implementing the lightning protocol and is //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those //! upon reconnect to the relevant peer(s). //! -//! It does not manage routing logic (see [`find_route`] for that) nor does it manage constructing +//! It does not manage routing logic (see [`Router`] for that) nor does it manage constructing //! on-chain transactions (it only monitors the chain to watch for any force-closes that might //! imply it needs to fail HTLCs/payments/channels it manages). -//! -//! [`find_route`]: crate::routing::router::find_route use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; @@ -55,12 +53,13 @@ use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VA use crate::ln::outbound_payment; use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment}; use crate::ln::wire::Encode; -use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner}; +use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig}; use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::util::events; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; +use crate::util::string::UntrustedString; use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; use crate::util::logger::{Level, Logger}; use crate::util::errors::APIError; @@ -78,7 +77,7 @@ use core::time::Duration; use core::ops::Deref; // Re-export this for use in the public API. -pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry}; +pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure}; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -200,7 +199,8 @@ struct ClaimableHTLC { } /// A payment identifier used to uniquely identify a payment to LDK. -/// (C-not exported) as we just use [u8; 32] directly +/// +/// This is not exported to bindings users as we just use [u8; 32] directly #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct PaymentId(pub [u8; 32]); @@ -218,7 +218,8 @@ impl Readable for PaymentId { } /// An identifier used to uniquely identify an intercepted HTLC to LDK. -/// (C-not exported) as we just use [u8; 32] directly +/// +/// This is not exported to bindings users as we just use [u8; 32] directly #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct InterceptId(pub [u8; 32]); @@ -234,6 +235,36 @@ impl Readable for InterceptId { Ok(InterceptId(buf)) } } + +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`]. +pub(crate) enum SentHTLCId { + PreviousHopData { short_channel_id: u64, htlc_id: u64 }, + OutboundRoute { session_priv: SecretKey }, +} +impl SentHTLCId { + pub(crate) fn from_source(source: &HTLCSource) -> Self { + match source { + HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData { + short_channel_id: hop_data.short_channel_id, + htlc_id: hop_data.htlc_id, + }, + HTLCSource::OutboundRoute { session_priv, .. } => + Self::OutboundRoute { session_priv: *session_priv }, + } + } +} +impl_writeable_tlv_based_enum!(SentHTLCId, + (0, PreviousHopData) => { + (0, short_channel_id, required), + (2, htlc_id, required), + }, + (2, OutboundRoute) => { + (0, session_priv, required), + }; +); + + /// Tracks the inbound corresponding to an outbound HTLC #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash #[derive(Clone, PartialEq, Eq)] @@ -247,11 +278,6 @@ pub(crate) enum HTLCSource { first_hop_htlc_msat: u64, payment_id: PaymentId, payment_secret: Option, - /// Note that this is now "deprecated" - we write it for forwards (and read it for - /// backwards) compatibility reasons, but prefer to use the data in the - /// [`super::outbound_payment`] module, which stores per-payment data once instead of in - /// each HTLC. - payment_params: Option, }, } #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash @@ -262,14 +288,13 @@ impl core::hash::Hash for HTLCSource { 0u8.hash(hasher); prev_hop_data.hash(hasher); }, - HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payment_params } => { + HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat } => { 1u8.hash(hasher); path.hash(hasher); session_priv[..].hash(hasher); payment_id.hash(hasher); payment_secret.hash(hasher); first_hop_htlc_msat.hash(hasher); - payment_params.hash(hasher); }, } } @@ -284,7 +309,6 @@ impl HTLCSource { first_hop_htlc_msat: 0, payment_id: PaymentId([2; 32]), payment_secret: None, - payment_params: None, } } } @@ -468,7 +492,7 @@ pub(crate) enum MonitorUpdateCompletionAction { impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (0, PaymentClaimed) => { (0, payment_hash, required) }, - (2, EmitEvent) => { (0, event, ignorable) }, + (2, EmitEvent) => { (0, event, upgradable_required) }, ); /// State we hold per-peer. @@ -538,15 +562,16 @@ struct PendingInboundPayment { min_value_msat: Option, } -/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g. -/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static +/// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g. +/// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case -/// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents -/// issues such as overly long function definitions. Note that the ChannelManager can take any type -/// that implements KeysInterface or Router for its keys manager and router, respectively, but this -/// type alias chooses the concrete types of KeysManager and DefaultRouter. +/// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents +/// issues such as overly long function definitions. Note that the `ChannelManager` can take any type +/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager, +/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types +/// of [`KeysManager`] and [`DefaultRouter`]. /// -/// (C-not exported) as Arcs don't make sense in bindings +/// This is not exported to bindings users as Arcs don't make sense in bindings pub type SimpleArcChannelManager = ChannelManager< Arc, Arc, @@ -562,54 +587,71 @@ pub type SimpleArcChannelManager = ChannelManager< Arc >; -/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference -/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't +/// [`SimpleRefChannelManager`] is a type alias for a ChannelManager reference, and is the reference +/// counterpart to the [`SimpleArcChannelManager`] type alias. Use this type by default when you don't /// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as -/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes). +/// usage of lightning-net-tokio (since `tokio::spawn` requires parameters with static lifetimes). /// But if this is not necessary, using a reference is more efficient. Defining these type aliases /// issues such as overly long function definitions. Note that the ChannelManager can take any type -/// that implements KeysInterface or Router for its keys manager and router, respectively, but this -/// type alias chooses the concrete types of KeysManager and DefaultRouter. +/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager, +/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types +/// of [`KeysManager`] and [`DefaultRouter`]. /// -/// (C-not exported) as Arcs don't make sense in bindings +/// This is not exported to bindings users as Arcs don't make sense in bindings pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. /// -/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through +/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through /// to individual Channels. /// -/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for +/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for /// all peers during write/read (though does not modify this instance, only the instance being -/// serialized). This will result in any channels which have not yet exchanged funding_created (ie -/// called funding_transaction_generated for outbound channels). +/// serialized). This will result in any channels which have not yet exchanged [`funding_created`] (i.e., +/// called [`funding_transaction_generated`] for outbound channels) being closed. /// -/// Note that you can be a bit lazier about writing out ChannelManager than you can be with -/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before -/// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates -/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during +/// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with +/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before +/// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates +/// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during /// the serialization process). If the deserialized version is out-of-date compared to the -/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the -/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees). +/// [`ChannelMonitor`] passed by reference to [`read`], those channels will be force-closed based on the +/// `ChannelMonitor` state and no funds will be lost (mod on-chain transaction fees). /// -/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which -/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along -/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call -/// block_connected() to step towards your best block) upon deserialization before using the -/// object! +/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which +/// tells you the last block hash which was connected. You should get the best block tip before using the manager. +/// See [`chain::Listen`] and [`chain::Confirm`] for more details. /// -/// Note that ChannelManager is responsible for tracking liveness of its channels and generating -/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid +/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating +/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been /// offline for a full minute. In order to track this, you must call -/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect. +/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect. /// -/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager -/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but -/// essentially you should default to using a SimpleRefChannelManager, and use a -/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when +/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and +/// inbound channels without confirmed funding transactions. This may result in nodes which we do +/// not have a channel with being unable to connect to us or open new channels with us if we have +/// many peers with unfunded channels. +/// +/// Because it is an indication of trust, inbound channels which we've accepted as 0conf are +/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are +/// never limited. Please ensure you limit the count of such channels yourself. +/// +/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`] +/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but +/// essentially you should default to using a [`SimpleRefChannelManager`], and use a +/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when /// you're using lightning-net-tokio. +/// +/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected +/// [`funding_created`]: msgs::FundingCreated +/// [`funding_transaction_generated`]: Self::funding_transaction_generated +/// [`BlockHash`]: bitcoin::hash_types::BlockHash +/// [`update_channel`]: chain::Watch::update_channel +/// [`ChannelUpdate`]: msgs::ChannelUpdate +/// [`timer_tick_occurred`]: Self::timer_tick_occurred +/// [`read`]: ReadableArgs::read // // Lock order: // The tree structure below illustrates the lock order requirements for the different locks of the @@ -955,6 +997,19 @@ pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3; /// [`OutboundPayments::remove_stale_resolved_payments`]. pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7; +/// The maximum number of unfunded channels we can have per-peer before we start rejecting new +/// (inbound) ones. The number of peers with unfunded channels is limited separately in +/// [`MAX_UNFUNDED_CHANNEL_PEERS`]. +const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4; + +/// The maximum number of peers from which we will allow pending unfunded channels. Once we reach +/// this many peers we reject new (inbound) channels from peers with which we don't have a channel. +const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50; + +/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this +/// many peers we reject new (inbound) connections. +const MAX_NO_CHANNEL_PEERS: usize = 250; + /// Information needed for constructing an invoice route hint for this channel. #[derive(Clone, Debug, PartialEq)] pub struct CounterpartyForwardingInfo { @@ -997,7 +1052,7 @@ pub struct ChannelCounterparty { pub outbound_htlc_maximum_msat: Option, } -/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels +/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`] #[derive(Clone, Debug, PartialEq)] pub struct ChannelDetails { /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes, @@ -1068,6 +1123,11 @@ pub struct ChannelDetails { /// inbound. This may be zero for inbound channels serialized with LDK versions prior to /// 0.0.113. pub user_channel_id: u128, + /// The currently negotiated fee rate denominated in satoshi per 1000 weight units, + /// which is applied to commitment and HTLC transactions. + /// + /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115. + pub feerate_sat_per_1000_weight: Option, /// Our total balance. This is the amount we would get if we close the channel. /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this /// amount is not likely to be recoverable on close. @@ -1179,6 +1239,56 @@ impl ChannelDetails { pub fn get_outbound_payment_scid(&self) -> Option { self.short_channel_id.or(self.outbound_scid_alias) } + + fn from_channel(channel: &Channel, + best_block_height: u32, latest_features: InitFeatures) -> Self { + + let balance = channel.get_available_balances(); + let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = + channel.get_holder_counterparty_selected_channel_reserve_satoshis(); + ChannelDetails { + channel_id: channel.channel_id(), + counterparty: ChannelCounterparty { + node_id: channel.get_counterparty_node_id(), + features: latest_features, + unspendable_punishment_reserve: to_remote_reserve_satoshis, + forwarding_info: channel.counterparty_forwarding_info(), + // Ensures that we have actually received the `htlc_minimum_msat` value + // from the counterparty through the `OpenChannel` or `AcceptChannel` + // message (as they are always the first message from the counterparty). + // Else `Channel::get_counterparty_htlc_minimum_msat` could return the + // default `0` value set by `Channel::new_outbound`. + outbound_htlc_minimum_msat: if channel.have_received_message() { + Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, + outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), + }, + funding_txo: channel.get_funding_txo(), + // Note that accept_channel (or open_channel) is always the first message, so + // `have_received_message` indicates that type negotiation has completed. + channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, + short_channel_id: channel.get_short_channel_id(), + outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, + inbound_scid_alias: channel.latest_inbound_scid_alias(), + channel_value_satoshis: channel.get_value_satoshis(), + feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()), + unspendable_punishment_reserve: to_self_reserve_satoshis, + balance_msat: balance.balance_msat, + inbound_capacity_msat: balance.inbound_capacity_msat, + outbound_capacity_msat: balance.outbound_capacity_msat, + next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, + user_channel_id: channel.get_user_id(), + confirmations_required: channel.minimum_depth(), + confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), + force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), + is_outbound: channel.is_outbound(), + is_channel_ready: channel.is_usable(), + is_usable: channel.is_live(), + is_public: channel.should_announce(), + inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), + inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), + config: Some(channel.config()), + } + } } /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments. @@ -1357,69 +1467,6 @@ macro_rules! remove_channel { } } -macro_rules! handle_monitor_update_res { - ($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { - match $err { - ChannelMonitorUpdateStatus::PermanentFailure => { - log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..])); - update_maps_on_chan_removal!($self, $chan); - let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(), - $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() )); - (res, true) - }, - ChannelMonitorUpdateStatus::InProgress => { - log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations", - log_bytes!($chan_id[..]), - if $resend_commitment && $resend_raa { - match $action_type { - RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" }, - RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" }, - } - } else if $resend_commitment { "commitment" } - else if $resend_raa { "RAA" } - else { "nothing" }, - (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(), - (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(), - (&$failed_finalized_fulfills as &Vec).len()); - if !$resend_commitment { - debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa); - } - if !$resend_raa { - debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); - } - $chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills); - (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false) - }, - ChannelMonitorUpdateStatus::Completed => { - (Ok(()), false) - }, - } - }; - ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { { - let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); - if drop { - $entry.remove_entry(); - } - res - } }; - ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { { - debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst); - handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) - } }; - ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => { - handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) - }; - ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => { - handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new()) - }; - ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new()) - }; - ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { - handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new()) - }; -} - macro_rules! send_channel_ready { ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { @@ -1458,7 +1505,7 @@ macro_rules! emit_channel_ready_event { } macro_rules! handle_monitor_update_completion { - ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr) => { { + ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { let mut updates = $chan.monitor_updating_restored(&$self.logger, &$self.node_signer, $self.genesis_hash, &$self.default_configuration, $self.best_block.read().unwrap().height()); @@ -1491,6 +1538,7 @@ macro_rules! handle_monitor_update_completion { let channel_id = $chan.channel_id(); core::mem::drop($peer_state_lock); + core::mem::drop($per_peer_state_lock); $self.handle_monitor_update_completion_actions(update_actions); @@ -1506,7 +1554,7 @@ macro_rules! handle_monitor_update_completion { } macro_rules! handle_new_monitor_update { - ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { { + ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { { // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in // any case so that it won't deadlock. debug_assert!($self.id_to_peer.try_lock().is_ok()); @@ -1533,14 +1581,14 @@ macro_rules! handle_new_monitor_update { .update_id == $update_id) && $chan.get_latest_monitor_update_id() == $update_id { - handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $chan); + handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan); } Ok(()) }, } } }; - ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan_entry: expr) => { - handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry()) + ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => { + handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry()) } } @@ -1555,16 +1603,21 @@ where R::Target: Router, L::Target: Logger, { - /// Constructs a new ChannelManager to hold several channels and route between them. + /// Constructs a new `ChannelManager` to hold several channels and route between them. /// /// This is the main "logic hub" for all channel-related actions, and implements - /// ChannelMessageHandler. + /// [`ChannelMessageHandler`]. /// /// Non-proportional fees are fixed according to our risk using the provided fee estimator. /// - /// Users need to notify the new ChannelManager when a new block is connected or - /// disconnected using its `block_connected` and `block_disconnected` methods, starting - /// from after `params.latest_hash`. + /// Users need to notify the new `ChannelManager` when a new block is connected or + /// disconnected using its [`block_connected`] and [`block_disconnected`] methods, starting + /// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for + /// more details. + /// + /// [`block_connected`]: chain::Listen::block_connected + /// [`block_disconnected`]: chain::Listen::block_disconnected + /// [`params.best_block.block_hash`]: chain::BestBlock::block_hash pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self { let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); @@ -1728,71 +1781,28 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for (channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { - let balance = channel.get_available_balances(); - let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - channel.get_holder_counterparty_selected_channel_reserve_satoshis(); - res.push(ChannelDetails { - channel_id: (*channel_id).clone(), - counterparty: ChannelCounterparty { - node_id: channel.get_counterparty_node_id(), - features: peer_state.latest_features.clone(), - unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: channel.counterparty_forwarding_info(), - // Ensures that we have actually received the `htlc_minimum_msat` value - // from the counterparty through the `OpenChannel` or `AcceptChannel` - // message (as they are always the first message from the counterparty). - // Else `Channel::get_counterparty_htlc_minimum_msat` could return the - // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if channel.have_received_message() { - Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), - }, - funding_txo: channel.get_funding_txo(), - // Note that accept_channel (or open_channel) is always the first message, so - // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, - short_channel_id: channel.get_short_channel_id(), - outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, - inbound_scid_alias: channel.latest_inbound_scid_alias(), - channel_value_satoshis: channel.get_value_satoshis(), - unspendable_punishment_reserve: to_self_reserve_satoshis, - balance_msat: balance.balance_msat, - inbound_capacity_msat: balance.inbound_capacity_msat, - outbound_capacity_msat: balance.outbound_capacity_msat, - next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, - user_channel_id: channel.get_user_id(), - confirmations_required: channel.minimum_depth(), - confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), - force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), - is_outbound: channel.is_outbound(), - is_channel_ready: channel.is_usable(), - is_usable: channel.is_live(), - is_public: channel.should_announce(), - inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), - config: Some(channel.config()), - }); + for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { + let details = ChannelDetails::from_channel(channel, best_block_height, + peer_state.latest_features.clone()); + res.push(details); } } } res } - /// Gets the list of open channels, in random order. See ChannelDetail field documentation for + /// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for /// more information. pub fn list_channels(&self) -> Vec { self.list_channels_with_filter(|_| true) } - /// Gets the list of usable channels, in random order. Useful as an argument to [`find_route`] - /// to ensure non-announced channels are used. + /// Gets the list of usable channels, in random order. Useful as an argument to + /// [`Router::find_route`] to ensure non-announced channels are used. /// /// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the /// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria /// are. - /// - /// [`find_route`]: crate::routing::router::find_route pub fn list_usable_channels(&self) -> Vec { // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user @@ -1800,6 +1810,24 @@ where self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } + /// Gets the list of channels we have with a given counterparty, in random order. + pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec { + let best_block_height = self.best_block.read().unwrap().height(); + let per_peer_state = self.per_peer_state.read().unwrap(); + + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let features = &peer_state.latest_features; + return peer_state.channel_by_id + .iter() + .map(|(_, channel)| + ChannelDetails::from_channel(channel, best_block_height, features.clone())) + .collect(); + } + vec![] + } + /// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a /// successful path, or have unresolved HTLCs. /// @@ -1876,7 +1904,7 @@ where if let Some(monitor_update) = monitor_update_opt.take() { let update_id = monitor_update.update_id; let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update); - break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry); + break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry); } if chan_entry.get().is_shutdown() { @@ -1916,11 +1944,12 @@ where /// would appear on a force-closure transaction, whichever is lower. We will allow our /// counterparty to pay as much fee as they'd like, however. /// - /// May generate a SendShutdown message event on success, which should be relayed. + /// May generate a [`SendShutdown`] message event on success, which should be relayed. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`SendShutdown`]: crate::util::events::MessageSendEvent::SendShutdown pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, None) } @@ -1939,11 +1968,12 @@ where /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which /// will appear on a force-closure transaction, whichever is lower). /// - /// May generate a SendShutdown message event on success, which should be relayed. + /// May generate a [`SendShutdown`] message event on success, which should be relayed. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`SendShutdown`]: crate::util::events::MessageSendEvent::SendShutdown pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight)) } @@ -1979,7 +2009,7 @@ where let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }); } else { self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); } @@ -2388,13 +2418,16 @@ where pending_forward_info } - /// Gets the current channel_update for the given channel. This first checks if the channel is + /// Gets the current [`channel_update`] for the given channel. This first checks if the channel is /// public, and thus should be called whenever the result is going to be passed out in a /// [`MessageSendEvent::BroadcastChannelUpdate`] event. /// - /// Note that in `internal_closing_signed`, this function is called without the `peer_state` + /// Note that in [`internal_closing_signed`], this function is called without the `peer_state` /// corresponding to the channel's counterparty locked, as the channel been removed from the /// storage and the `peer_state` lock has been dropped. + /// + /// [`channel_update`]: msgs::ChannelUpdate + /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { if !chan.should_announce() { return Err(LightningError { @@ -2409,14 +2442,17 @@ where self.get_channel_update_for_unicast(chan) } - /// Gets the current channel_update for the given channel. This does not check if the channel - /// is public (only returning an Err if the channel does not yet have an assigned short_id), + /// Gets the current [`channel_update`] for the given channel. This does not check if the channel + /// is public (only returning an `Err` if the channel does not yet have an assigned SCID), /// and thus MUST NOT be called unless the recipient of the resulting message has already /// provided evidence that they know about the existence of the channel. /// - /// Note that through `internal_closing_signed`, this function is called without the + /// Note that through [`internal_closing_signed`], this function is called without the /// `peer_state` corresponding to the channel's counterparty locked, as the channel been /// removed from the storage and the `peer_state` lock has been dropped. + /// + /// [`channel_update`]: msgs::ChannelUpdate + /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { @@ -2454,22 +2490,28 @@ where }) } - // Only public for testing, this should otherwise never be called direcly - pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_params: &Option, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { + #[cfg(test)] + pub(crate) fn test_send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { + let _lck = self.total_consistency_lock.read().unwrap(); + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes) + } + + fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { + // The top-level caller should hold the total_consistency_lock read lock. + debug_assert!(self.total_consistency_lock.try_write().is_err()); + log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); let prng_seed = self.entropy_source.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted"); let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) - .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected"})?; + .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?; let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?; if onion_utils::route_size_insane(&onion_payloads) { - return Err(APIError::InvalidRoute{err: "Route size too large considering onion data"}); + return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()}); } let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let err: Result<(), _> = loop { let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}), @@ -2478,7 +2520,7 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(&counterparty_node_id) - .ok_or_else(|| APIError::InvalidRoute{err: "No peer matching the path's first hop found!" })?; + .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { @@ -2493,13 +2535,12 @@ where first_hop_htlc_msat: htlc_msat, payment_id, payment_secret: payment_secret.clone(), - payment_params: payment_params.clone(), }, onion_packet, &self.logger); match break_chan_entry!(self, send_res, chan) { Some(monitor_update) => { let update_id = monitor_update.update_id; let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update); - if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan) { + if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) { break Err(e); } if update_res == ChannelMonitorUpdateStatus::InProgress { @@ -2537,7 +2578,7 @@ where /// Value parameters are provided via the last hop in route, see documentation for [`RouteHop`] /// fields for more info. /// - /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via + /// May generate [`UpdateHTLCs`] message(s) event on success, which should be relayed (e.g. via /// [`PeerManager::process_events`]). /// /// # Avoiding Duplicate Payments @@ -2561,7 +2602,7 @@ where /// /// # Possible Error States on [`PaymentSendFailure`] /// - /// Each path may have a different return value, and PaymentSendValue may return a Vec with + /// Each path may have a different return value, and [`PaymentSendFailure`] may return a `Vec` with /// each entry matching the corresponding-index entry in the route paths, see /// [`PaymentSendFailure`] for more info. /// @@ -2574,7 +2615,7 @@ where /// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the /// relevant updates. /// - /// Note that depending on the type of the PaymentSendFailure the HTLC may have been + /// Note that depending on the type of the [`PaymentSendFailure`] the HTLC may have been /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a /// different route unless you intend to pay twice! /// @@ -2592,34 +2633,39 @@ where /// /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`Event::PaymentFailed`]: events::Event::PaymentFailed + /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.pending_outbound_payments .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on /// `route_params` and retry failed payment paths based on `retry_strategy`. - pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), PaymentSendFailure> { + pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.pending_outbound_payments .send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.pending_events, + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } #[cfg(test)] fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } #[cfg(test)] @@ -2667,11 +2713,12 @@ where /// [`send_payment`]: Self::send_payment pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, payment_id: PaymentId) -> Result { let best_block_height = self.best_block.read().unwrap().height(); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.pending_outbound_payments.send_spontaneous_payment_with_route( route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route @@ -2681,14 +2728,15 @@ where /// payments. /// /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend - pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result { + pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result { let best_block_height = self.best_block.read().unwrap().height(); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.logger, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.logger, &self.pending_events, + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } /// Send a payment that is probing the given route for liquidity. We calculate the @@ -2696,9 +2744,10 @@ where /// us to easily discern them from real payments. pub fn send_probe(&self, hops: Vec) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a @@ -2739,7 +2788,7 @@ where (chan, funding_msg) }, Err(_) => { return Err(APIError::ChannelUnavailable { - err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned() + err: "Signer refused to sign the initial commitment transaction".to_owned() }) }, } }; @@ -3422,8 +3471,8 @@ where self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.pending_events, &self.logger, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)); + |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)); for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); @@ -3475,18 +3524,18 @@ where fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { if !chan.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother - if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { + if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } if !chan.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", - log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.logger); NotifyOption::DoPersist @@ -3521,15 +3570,18 @@ where /// /// This currently includes: /// * Increasing or decreasing the on-chain feerate estimates for our outbound channels, - /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more + /// * Broadcasting [`ChannelUpdate`] messages if we've been disconnected from our peer for more /// than a minute, informing the network that they should no longer attempt to route over /// the channel. - /// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs - /// with the current `ChannelConfig`. + /// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs + /// with the current [`ChannelConfig`]. /// * Removing peers which have disconnected but and no longer have any channels. /// - /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate + /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate /// estimate fetches. + /// + /// [`ChannelUpdate`]: msgs::ChannelUpdate + /// [`ChannelConfig`]: crate::util::config::ChannelConfig pub fn timer_tick_occurred(&self) { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = NotifyOption::SkipPersist; @@ -3680,14 +3732,14 @@ where /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on /// startup during which time claims that were in-progress at shutdown may be replayed. pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { - self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails); + self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails); } /// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the /// reason for the failure. /// /// See [`FailureCode`] for valid failure codes. - pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) { + pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash); @@ -3702,14 +3754,14 @@ where } /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`]. - fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { + fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { match failure_code { - FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16), - FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16), + FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16), + FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16), FailureCode::IncorrectOrUnknownPaymentDetails => { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); - HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data) + HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data) } } } @@ -3809,17 +3861,20 @@ where // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { - HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => { - self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, session_priv, payment_id, payment_params, self.probing_cookie_secret, &self.secp_ctx, &self.pending_events, &self.logger); + HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => { + if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, + session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx, + &self.pending_events, &self.logger) + { self.push_pending_forwards_ev(); } }, HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => { log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error); let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret); - let mut forward_event = None; + let mut push_forward_ev = false; let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); if forward_htlcs.is_empty() { - forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); + push_forward_ev = true; } match forward_htlcs.entry(*short_channel_id) { hash_map::Entry::Occupied(mut entry) => { @@ -3830,12 +3885,8 @@ where } } mem::drop(forward_htlcs); + if push_forward_ev { self.push_pending_forwards_ev(); } let mut pending_events = self.pending_events.lock().unwrap(); - if let Some(time) = forward_event { - pending_events.push(events::Event::PendingHTLCsForwardable { - time_forwardable: time - }); - } pending_events.push(events::Event::HTLCHandlingFailed { prev_channel_id: outpoint.to_channel_id(), failed_next_destination: destination, @@ -4014,13 +4065,14 @@ where None => None }; - let mut peer_state_opt = counterparty_node_id_opt.as_ref().map( + let peer_state_opt = counterparty_node_id_opt.as_ref().map( |counterparty_node_id| per_peer_state.get(counterparty_node_id).map( |peer_mutex| peer_mutex.lock().unwrap() ) ).unwrap_or(None); - if let Some(mut peer_state_lock) = peer_state_opt.take() { + if peer_state_opt.is_some() { + let mut peer_state_lock = peer_state_opt.unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { let counterparty_node_id = chan.get().get_counterparty_node_id(); @@ -4035,7 +4087,7 @@ where let update_id = monitor_update.update_id; let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update); let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, - peer_state, chan); + peer_state, per_peer_state, chan); if let Err(e) = res { // TODO: This is a *critical* error - we probably updated the outbound edge // of the HTLC's monitor with a preimage. We should retry this monitor @@ -4205,7 +4257,7 @@ where } fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock let counterparty_node_id = match counterparty_node_id { Some(cp_id) => cp_id.clone(), @@ -4236,7 +4288,7 @@ where if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { return; } - handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, channel.get_mut()); + handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut()); } /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`]. @@ -4284,11 +4336,13 @@ where fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty()); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let is_only_peer_channel = peer_state.channel_by_id.len() == 1; match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut channel) => { if !channel.get().inbound_is_awaiting_accept() { @@ -4306,6 +4360,21 @@ where peer_state.pending_msg_events.push(send_msg_err_event); let _ = remove_channel!(self, channel); return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); + } else { + // If this peer already has some channels, a new channel won't increase our number of peers + // with unfunded channels, so as long as we aren't over the maximum number of unfunded + // channels per-peer we can accept channels from a peer with existing ones. + if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { + let send_msg_err_event = events::MessageSendEvent::HandleError { + node_id: channel.get().get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage{ + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } + } + }; + peer_state.pending_msg_events.push(send_msg_err_event); + let _ = remove_channel!(self, channel); + return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() }); + } } peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { @@ -4320,6 +4389,43 @@ where Ok(()) } + /// Gets the number of peers which match the given filter and do not have any funded, outbound, + /// or 0-conf channels. + /// + /// The filter is called for each peer and provided with the number of unfunded, inbound, and + /// non-0-conf channels we have with the peer. + fn peers_without_funded_channels(&self, maybe_count_peer: Filter) -> usize + where Filter: Fn(&PeerState<::Signer>) -> bool { + let mut peers_without_funded_channels = 0; + let best_block_height = self.best_block.read().unwrap().height(); + { + let peer_state_lock = self.per_peer_state.read().unwrap(); + for (_, peer_mtx) in peer_state_lock.iter() { + let peer = peer_mtx.lock().unwrap(); + if !maybe_count_peer(&*peer) { continue; } + let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height); + if num_unfunded_channels == peer.channel_by_id.len() { + peers_without_funded_channels += 1; + } + } + } + return peers_without_funded_channels; + } + + fn unfunded_channel_count( + peer: &PeerState<::Signer>, best_block_height: u32 + ) -> usize { + let mut num_unfunded_channels = 0; + for (_, chan) in peer.channel_by_id.iter() { + if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 && + chan.get_funding_tx_confirmations(best_block_height) == 0 + { + num_unfunded_channels += 1; + } + } + num_unfunded_channels + } + fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { if msg.chain_hash != self.genesis_hash { return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); @@ -4332,8 +4438,13 @@ where let mut random_bytes = [0u8; 16]; random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]); let user_channel_id = u128::from_be_bytes(random_bytes); - let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); + + // Get the number of peers with channels, but without funded ones. We don't care too much + // about peers that never open a channel, so we filter by peers that have at least one + // channel, and then limit the number of those with unfunded channels. + let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty()); + let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -4342,9 +4453,29 @@ where })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + + // If this peer already has some channels, a new channel won't increase our number of peers + // with unfunded channels, so as long as we aren't over the maximum number of unfunded + // channels per-peer we can accept channels from a peer with existing ones. + if peer_state.channel_by_id.is_empty() && + channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS && + !self.default_configuration.manually_accept_inbound_channels + { + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "Have too many peers with unfunded channels, not accepting new ones".to_owned(), + msg.temporary_channel_id.clone())); + } + + let best_block_height = self.best_block.read().unwrap().height(); + if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER { + return Err(MsgHandleErrInternal::send_err_msg_no_close( + format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER), + msg.temporary_channel_id.clone())); + } + let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, - counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, - self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias) + counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, + &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias) { Err(e) => { self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); @@ -4415,60 +4546,31 @@ where } fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { + let best_block = *self.best_block.read().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id) })?; - let ((funding_msg, monitor, mut channel_ready), mut chan) = { - let best_block = *self.best_block.read().unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let ((funding_msg, monitor), chan) = match peer_state.channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) - } - }; - // Because we have exclusive ownership of the channel here we can release the peer_state - // lock before watch_channel - match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) { - ChannelMonitorUpdateStatus::Completed => {}, - ChannelMonitorUpdateStatus::PermanentFailure => { - // Note that we reply with the new channel_id in error messages if we gave up on the - // channel, not the temporary_channel_id. This is compatible with ourselves, but the - // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for - // any messages referencing a previously-closed channel anyway. - // We do not propagate the monitor update to the user as it would be for a monitor - // that we didn't manage to store (and that we don't care about - we don't respond - // with the funding_signed so the channel can never go on chain). - let (_monitor_update, failed_htlcs) = chan.force_shutdown(false); - assert!(failed_htlcs.is_empty()); - return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id)); - }, - ChannelMonitorUpdateStatus::InProgress => { - // There's no problem signing a counterparty's funding transaction if our monitor - // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't - // accepted payment from yet. We do, however, need to wait to send our channel_ready - // until we have persisted our monitor. - chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new()); - channel_ready = None; // Don't send the channel_ready now - }, - } - // It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the - // peer exists, despite the inner PeerState potentially having no channels after removing - // the channel above. - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + }; + match peer_state.channel_by_id.entry(funding_msg.channel_id) { hash_map::Entry::Occupied(_) => { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) + Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(e) => { - let mut id_to_peer = self.id_to_peer.lock().unwrap(); - match id_to_peer.entry(chan.channel_id()) { + match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) { hash_map::Entry::Occupied(_) => { return Err(MsgHandleErrInternal::send_err_msg_no_close( "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), @@ -4478,63 +4580,67 @@ where i_e.insert(chan.get_counterparty_node_id()); } } + + // There's no problem signing a counterparty's funding transaction if our monitor + // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't + // accepted payment from yet. We do, however, need to wait to send our channel_ready + // until we have persisted our monitor. + let new_channel_id = funding_msg.channel_id; peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { node_id: counterparty_node_id.clone(), msg: funding_msg, }); - if let Some(msg) = channel_ready { - send_channel_ready!(self, peer_state.pending_msg_events, chan, msg); + + let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); + + let chan = e.insert(chan); + let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, + per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) }); + + // Note that we reply with the new channel_id in error messages if we gave up on the + // channel, not the temporary_channel_id. This is compatible with ourselves, but the + // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for + // any messages referencing a previously-closed channel anyway. + // We do not propagate the monitor update to the user as it would be for a monitor + // that we didn't manage to store (and that we don't care about - we don't respond + // with the funding_signed so the channel can never go on chain). + if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res { + res.0 = None; } - e.insert(chan); + res } } - Ok(()) } fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { - let funding_tx = { - let best_block = *self.best_block.read().unwrap(); - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) - })?; + let best_block = *self.best_block.read().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = per_peer_state.get(counterparty_node_id) + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) { - Ok(update) => update, - Err(e) => try_chan_entry!(self, Err(e), chan), - }; - match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { - ChannelMonitorUpdateStatus::Completed => {}, - e => { - let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); - if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { - // We weren't able to watch the channel to begin with, so no updates should be made on - // it. Previously, full_stack_target found an (unreachable) panic when the - // monitor update contained within `shutdown_finish` was applied. - if let Some((ref mut shutdown_finish, _)) = shutdown_finish { - shutdown_finish.0.take(); - } - } - return res - }, - } - if let Some(msg) = channel_ready { - send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg); + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + let monitor = try_chan_entry!(self, + chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan); + let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor); + let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); } - funding_tx - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) - } - }; - log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid()); - self.tx_broadcaster.broadcast_transaction(&funding_tx); - Ok(()) + } + res + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } } fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> { @@ -4618,7 +4724,7 @@ where if let Some(monitor_update) = monitor_update_opt { let update_id = monitor_update.update_id; let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update); - break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry); + break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry); } break Ok(()); }, @@ -4810,7 +4916,7 @@ where let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); let update_id = monitor_update.update_id; handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, - peer_state, chan) + peer_state, per_peer_state, chan) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -4819,7 +4925,7 @@ where #[inline] fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) { for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { - let mut forward_event = None; + let mut push_forward_event = false; let mut new_intercept_events = Vec::new(); let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { @@ -4877,7 +4983,7 @@ where // We don't want to generate a PendingHTLCsForwardable event if only intercepted // payments are being processed. if forward_htlcs_empty { - forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); + push_forward_event = true; } entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }))); @@ -4895,28 +5001,32 @@ where let mut events = self.pending_events.lock().unwrap(); events.append(&mut new_intercept_events); } + if push_forward_event { self.push_pending_forwards_ev() } + } + } - match forward_event { - Some(time) => { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PendingHTLCsForwardable { - time_forwardable: time - }); - } - None => {}, - } + // We only want to push a PendingHTLCsForwardable event if no others are queued. + fn push_pending_forwards_ev(&self) { + let mut pending_events = self.pending_events.lock().unwrap(); + let forward_ev_exists = pending_events.iter() + .find(|ev| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }) + .is_some(); + if !forward_ev_exists { + pending_events.push(events::Event::PendingHTLCsForwardable { + time_forwardable: + Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS), + }); } } fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> { let (htlcs_to_fail, res) = { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) + let mut peer_state_lock = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) - })?; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + }).map(|mtx| mtx.lock().unwrap())?; let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { @@ -4924,8 +5034,8 @@ where let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); let update_id = monitor_update.update_id; - let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, - peer_state, chan); + let res = handle_new_monitor_update!(self, update_res, update_id, + peer_state_lock, peer_state, per_peer_state, chan); (htlcs_to_fail, res) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -5085,8 +5195,10 @@ where Ok(()) } - /// Process pending events from the `chain::Watch`, returning whether any events were processed. + /// Process pending events from the [`chain::Watch`], returning whether any events were processed. fn process_pending_monitor_events(&self) -> bool { + debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock + let mut failed_channels = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); @@ -5164,7 +5276,13 @@ where /// update events as a separate process method here. #[cfg(fuzzing)] pub fn process_monitor_events(&self) { - self.process_pending_monitor_events(); + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + if self.process_pending_monitor_events() { + NotifyOption::DoPersist + } else { + NotifyOption::SkipPersist + } + }); } /// Check the holding cell in each channel and free any pending HTLCs in them if possible. @@ -5174,38 +5292,45 @@ where let mut has_monitor_update = false; let mut failed_htlcs = Vec::new(); let mut handle_errors = Vec::new(); - let per_peer_state = self.per_peer_state.read().unwrap(); - for (_cp_id, peer_state_mutex) in per_peer_state.iter() { - 'chan_loop: loop { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state: &mut PeerState<_> = &mut *peer_state_lock; - for (channel_id, chan) in peer_state.channel_by_id.iter_mut() { - let counterparty_node_id = chan.get_counterparty_node_id(); - let funding_txo = chan.get_funding_txo(); - let (monitor_opt, holding_cell_failed_htlcs) = - chan.maybe_free_holding_cell_htlcs(&self.logger); - if !holding_cell_failed_htlcs.is_empty() { - failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id)); - } - if let Some(monitor_update) = monitor_opt { - has_monitor_update = true; - - let update_res = self.chain_monitor.update_channel( - funding_txo.expect("channel is live"), monitor_update); - let update_id = monitor_update.update_id; - let channel_id: [u8; 32] = *channel_id; - let res = handle_new_monitor_update!(self, update_res, update_id, - peer_state_lock, peer_state, chan, MANUALLY_REMOVING, - peer_state.channel_by_id.remove(&channel_id)); - if res.is_err() { - handle_errors.push((counterparty_node_id, res)); + // Walk our list of channels and find any that need to update. Note that when we do find an + // update, if it includes actions that must be taken afterwards, we have to drop the + // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we + // manage to go through all our peers without finding a single channel to update. + 'peer_loop: loop { + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + 'chan_loop: loop { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state: &mut PeerState<_> = &mut *peer_state_lock; + for (channel_id, chan) in peer_state.channel_by_id.iter_mut() { + let counterparty_node_id = chan.get_counterparty_node_id(); + let funding_txo = chan.get_funding_txo(); + let (monitor_opt, holding_cell_failed_htlcs) = + chan.maybe_free_holding_cell_htlcs(&self.logger); + if !holding_cell_failed_htlcs.is_empty() { + failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id)); + } + if let Some(monitor_update) = monitor_opt { + has_monitor_update = true; + + let update_res = self.chain_monitor.update_channel( + funding_txo.expect("channel is live"), monitor_update); + let update_id = monitor_update.update_id; + let channel_id: [u8; 32] = *channel_id; + let res = handle_new_monitor_update!(self, update_res, update_id, + peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING, + peer_state.channel_by_id.remove(&channel_id)); + if res.is_err() { + handle_errors.push((counterparty_node_id, res)); + } + continue 'peer_loop; } - continue 'chan_loop; } + break 'chan_loop; } - break 'chan_loop; } + break 'peer_loop; } let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty(); @@ -6168,13 +6293,13 @@ where let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id); } - fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) { + fn peer_disconnected(&self, counterparty_node_id: &PublicKey) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut failed_channels = Vec::new(); let mut per_peer_state = self.per_peer_state.write().unwrap(); let remove_peer = { - log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", - log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); + log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.", + log_pubkey!(counterparty_node_id)); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6216,7 +6341,7 @@ where debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); peer_state.is_connected = false; peer_state.ok_to_remove(true) - } else { true } + } else { debug_assert!(false, "Unconnected peer disconnected"); true } }; if remove_peer { per_peer_state.remove(counterparty_node_id); @@ -6228,20 +6353,28 @@ where } } - fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> { + fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> { if !init_msg.features.supports_static_remote_key() { - log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id)); + log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); return Err(()); } - log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + // If we have too many peers connected which don't have funded channels, disconnect the + // peer immediately (as long as it doesn't have funded channels). If we have a bunch of + // unfunded channels taking up space in memory for disconnected peers, we still let new + // peers connect, but we'll reject new channels from them. + let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected); + let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS; + { let mut peer_state_lock = self.per_peer_state.write().unwrap(); match peer_state_lock.entry(counterparty_node_id.clone()) { hash_map::Entry::Vacant(e) => { + if inbound_peer_limited { + return Err(()); + } e.insert(Mutex::new(PeerState { channel_by_id: HashMap::new(), latest_features: init_msg.features.clone(), @@ -6253,14 +6386,24 @@ where hash_map::Entry::Occupied(e) => { let mut peer_state = e.get().lock().unwrap(); peer_state.latest_features = init_msg.features.clone(); + + let best_block_height = self.best_block.read().unwrap().height(); + if inbound_peer_limited && + Self::unfunded_channel_count(&*peer_state, best_block_height) == + peer_state.channel_by_id.len() + { + return Err(()); + } + debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice"); peer_state.is_connected = true; }, } } - let per_peer_state = self.per_peer_state.read().unwrap(); + log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); + let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6379,8 +6522,8 @@ pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelType /// [`ChannelManager`]. pub fn provided_init_features(_config: &UserConfig) -> InitFeatures { // Note that if new features are added here which other peers may (eventually) require, we - // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for - // ErroringMessageHandler. + // should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for + // [`ErroringMessageHandler`]. let mut features = InitFeatures::empty(); features.set_data_loss_protect_optional(); features.set_upfront_shutdown_script_optional(); @@ -6454,6 +6597,7 @@ impl Writeable for ChannelDetails { (33, self.inbound_htlc_minimum_msat, option), (35, self.inbound_htlc_maximum_msat, option), (37, user_channel_id_high_opt, option), + (39, self.feerate_sat_per_1000_weight, option), }); Ok(()) } @@ -6489,6 +6633,7 @@ impl Readable for ChannelDetails { (33, inbound_htlc_minimum_msat, option), (35, inbound_htlc_maximum_msat, option), (37, user_channel_id_high_opt, option), + (39, feerate_sat_per_1000_weight, option), }); // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with @@ -6522,6 +6667,7 @@ impl Readable for ChannelDetails { is_public: is_public.0.unwrap(), inbound_htlc_minimum_msat, inbound_htlc_maximum_msat, + feerate_sat_per_1000_weight, }) } } @@ -6658,7 +6804,7 @@ impl Writeable for ClaimableHTLC { impl Readable for ClaimableHTLC { fn read(reader: &mut R) -> Result { - let mut prev_hop = crate::util::ser::OptionDeserWrapper(None); + let mut prev_hop = crate::util::ser::RequiredWrapper(None); let mut value = 0; let mut payment_data: Option = None; let mut cltv_expiry = 0; @@ -6708,32 +6854,40 @@ impl Readable for HTLCSource { let id: u8 = Readable::read(reader)?; match id { 0 => { - let mut session_priv: crate::util::ser::OptionDeserWrapper = crate::util::ser::OptionDeserWrapper(None); + let mut session_priv: crate::util::ser::RequiredWrapper = crate::util::ser::RequiredWrapper(None); let mut first_hop_htlc_msat: u64 = 0; - let mut path = Some(Vec::new()); + let mut path: Option> = Some(Vec::new()); let mut payment_id = None; let mut payment_secret = None; - let mut payment_params = None; + let mut payment_params: Option = None; read_tlv_fields!(reader, { (0, session_priv, required), (1, payment_id, option), (2, first_hop_htlc_msat, required), (3, payment_secret, option), (4, path, vec_type), - (5, payment_params, option), + (5, payment_params, (option: ReadableArgs, 0)), }); if payment_id.is_none() { // For backwards compat, if there was no payment_id written, use the session_priv bytes // instead. payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref())); } + if path.is_none() || path.as_ref().unwrap().is_empty() { + return Err(DecodeError::InvalidValue); + } + let path = path.unwrap(); + if let Some(params) = payment_params.as_mut() { + if params.final_cltv_expiry_delta == 0 { + params.final_cltv_expiry_delta = path.last().unwrap().cltv_expiry_delta; + } + } Ok(HTLCSource::OutboundRoute { session_priv: session_priv.0.unwrap(), first_hop_htlc_msat, - path: path.unwrap(), + path, payment_id: payment_id.unwrap(), payment_secret, - payment_params, }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), @@ -6745,7 +6899,7 @@ impl Readable for HTLCSource { impl Writeable for HTLCSource { fn write(&self, writer: &mut W) -> Result<(), crate::io::Error> { match self { - HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payment_params } => { + HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret } => { 0u8.write(writer)?; let payment_id_opt = Some(payment_id); write_tlv_fields!(writer, { @@ -6754,7 +6908,7 @@ impl Writeable for HTLCSource { (2, first_hop_htlc_msat, required), (3, payment_secret, option), (4, *path, vec_type), - (5, payment_params, option), + (5, None::, option), // payment_params in LDK versions prior to 0.0.115 }); } HTLCSource::PreviousHopData(ref field) => { @@ -6877,7 +7031,10 @@ where let mut monitor_update_blocked_actions_per_peer = None; let mut peer_states = Vec::new(); for (_, peer_state_mutex) in per_peer_state.iter() { - peer_states.push(peer_state_mutex.lock().unwrap()); + // Because we're holding the owning `per_peer_state` write lock here there's no chance + // of a lockorder violation deadlock - no other thread can be holding any + // per_peer_state lock at all. + peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self()); } (serializable_peer_count).write(writer)?; @@ -7081,7 +7238,7 @@ where /// In such cases the latest local transactions will be sent to the tx_broadcaster included in /// this struct. /// - /// (C-not exported) because we have no HashMap bindings + /// This is not exported to bindings users because we have no HashMap bindings pub channel_monitors: HashMap::Signer>>, } @@ -7369,6 +7526,10 @@ where probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes()); } + if !channel_closures.is_empty() { + pending_events_read.append(&mut channel_closures); + } + if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); } else if pending_outbound_payments.is_none() { @@ -7377,7 +7538,13 @@ where outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); } pending_outbound_payments = Some(outbounds); - } else { + } + let pending_outbounds = OutboundPayments { + pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), + retry_lock: Mutex::new(()) + }; + + { // If we're tracking pending payments, ensure we haven't lost any by looking at the // ChannelMonitor data for any channels for which we do not have authorative state // (i.e. those for which we just force-closed above or we otherwise don't have a @@ -7388,16 +7555,17 @@ where // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { - for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() { + for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source { if path.is_empty() { log_error!(args.logger, "Got an empty path for a pending payment"); return Err(DecodeError::InvalidValue); } + let path_amt = path.last().unwrap().fee_msat; let mut session_priv_bytes = [0; 32]; session_priv_bytes[..].copy_from_slice(&session_priv[..]); - match pending_outbound_payments.as_mut().unwrap().entry(payment_id) { + match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) { hash_map::Entry::Occupied(mut entry) => { let newly_added = entry.get_mut().insert(session_priv_bytes, &path); log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", @@ -7424,48 +7592,65 @@ where } } } - for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() { - if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint && - info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs` or - // `pending_intercepted_htlcs`, we were apparently not persisted after - // the monitor was when forwarding the payment. - forward_htlcs.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); - false + for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() { + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint && + info.prev_htlc_id == prev_hop_data.htlc_id + }; + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs` or + // `pending_intercepted_htlcs`, we were apparently not persisted after + // the monitor was when forwarding the payment. + forward_htlcs.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + false + } else { true } } else { true } + }); + !forwards.is_empty() + }); + pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + pending_events_read.retain(|event| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false } else { true } }); - !forwards.is_empty() - }); - pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); - pending_events_read.retain(|event| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); + }, + HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => { + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + // Note that we set `from_onchain` to "false" here, + // deliberately keeping the pending payment around forever. + // Given it should only occur when we have a channel we're + // force-closing for being stale that's okay. + // The alternative would be to wipe the state when claiming, + // generating a `PaymentPathSuccessful` event but regenerating + // it and the `PaymentSent` on every restart until the + // `ChannelMonitor` is removed. + pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger); + pending_events_read = pending_events.into_inner().unwrap(); + } + }, } } } } } - if !forward_htlcs.is_empty() { + if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() { // If we have pending HTLCs to forward, assume we either dropped a // `PendingHTLCsForwardable` or the user received it but never processed it as they // shut down before the timer hit. Either way, set the time_forwardable to a small @@ -7522,10 +7707,6 @@ where let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes()); - if !channel_closures.is_empty() { - pending_events_read.append(&mut channel_closures); - } - let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { Ok(key) => key, Err(()) => return Err(DecodeError::InvalidValue) @@ -7642,7 +7823,7 @@ where inbound_payment_key: expanded_inbound_key, pending_inbound_payments: Mutex::new(pending_inbound_payments), - pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), retry_lock: Mutex::new(()), }, + pending_outbound_payments: pending_outbounds, pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), forward_htlcs: Mutex::new(forward_htlcs), @@ -7728,8 +7909,10 @@ mod tests { // to connect messages with new values chan.0.contents.fee_base_msat *= 2; chan.1.contents.fee_base_msat *= 2; - let node_a_chan_info = nodes[0].node.list_channels()[0].clone(); - let node_b_chan_info = nodes[1].node.list_channels()[0].clone(); + let node_a_chan_info = nodes[0].node.list_channels_with_counterparty( + &nodes[1].node.get_our_node_id()).pop().unwrap(); + let node_b_chan_info = nodes[1].node.list_channels_with_counterparty( + &nodes[0].node.get_our_node_id()).pop().unwrap(); // The first two nodes (which opened a channel) should now require fresh persistence assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); @@ -7805,7 +7988,7 @@ mod tests { // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap(); - nodes[0].node.send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); + nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7835,7 +8018,7 @@ mod tests { expect_payment_failed!(nodes[0], our_payment_hash, true); // Send the second half of the original MPP payment. - nodes[0].node.send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); + nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7927,7 +8110,6 @@ mod tests { let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV), final_value_msat: 100_000, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, @@ -8018,7 +8200,6 @@ mod tests { let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, - final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); @@ -8043,7 +8224,7 @@ mod tests { assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1); } #[test] @@ -8061,7 +8242,6 @@ mod tests { let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, - final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); @@ -8087,7 +8267,7 @@ mod tests { assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "We don't support MPP keysend payments", 1); } #[test] @@ -8115,7 +8295,8 @@ mod tests { match nodes[0].node.send_payment(&route, payment_hash, &None, PaymentId(payment_hash.0)).unwrap_err() { PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => { - assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) }, + assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) + }, _ => panic!("unexpected error") } } @@ -8129,8 +8310,8 @@ mod tests { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); @@ -8175,7 +8356,7 @@ mod tests { match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) { Ok(_) => panic!("Unexpected ok"), Err(()) => { - nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1); } } @@ -8215,10 +8396,10 @@ mod tests { let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); assert!(nodes_0_lock.contains_key(channel_id)); - - assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); } + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); @@ -8226,7 +8407,9 @@ mod tests { let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); assert!(nodes_0_lock.contains_key(channel_id)); + } + { // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as // as it has the funding transaction. let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); @@ -8256,7 +8439,9 @@ mod tests { let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); assert!(nodes_0_lock.contains_key(channel_id)); + } + { // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature // from `nodes[0]` for the closing transaction with the proposed fee, the channel is @@ -8350,6 +8535,213 @@ mod tests { check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key); } + #[test] + fn test_connection_limiting() { + // Test that we limit un-channel'd peers and un-funded channels properly. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Note that create_network connects the nodes together for us + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap(); + let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + let mut funding_tx = None; + for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER { + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + + if idx == 0 { + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel); + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); + funding_tx = Some(tx.clone()); + nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx).unwrap(); + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + check_added_monitors!(nodes[1], 1); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); + check_added_monitors!(nodes[0], 1); + } + open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + } + + // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected + open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, + open_channel_msg.temporary_channel_id); + + // Further, because all of our channels with nodes[0] are inbound, and none of them funded, + // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS + // limit. + let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS); + for _ in 1..super::MAX_NO_CHANNEL_PEERS { + let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, + &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); + peer_pks.push(random_pk); + nodes[1].node.peer_connected(&random_pk, &msgs::Init { + features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + } + let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, + &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); + nodes[1].node.peer_connected(&last_random_pk, &msgs::Init { + features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err(); + + // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from + // them if we have too many un-channel'd peers. + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + let chan_closed_events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1); + for ev in chan_closed_events { + if let Event::ChannelClosed { .. } = ev { } else { panic!(); } + } + nodes[1].node.peer_connected(&last_random_pk, &msgs::Init { + features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err(); + + // but of course if the connection is outbound its allowed... + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around. + // Even though we accept one more connection from new peers, we won't actually let them + // open channels. + assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1); + for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 { + nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg); + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]); + open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + } + nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); + assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, + open_channel_msg.temporary_channel_id); + + // Of course, however, outbound channels are always allowed + nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None).unwrap(); + get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk); + + // If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now + // "protected" and can connect again. + mine_transaction(&nodes[1], funding_tx.as_ref().unwrap()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + // Further, because the first channel was funded, we can open another channel with + // last_random_pk. + nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk); + } + + #[test] + fn test_outbound_chans_unlimited() { + // Test that we never refuse an outbound channel even if a peer is unfuned-channel-limited + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Note that create_network connects the nodes together for us + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap(); + let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER { + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + } + + // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be + // rejected. + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, + open_channel_msg.temporary_channel_id); + + // but we can still open an outbound channel. + nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None).unwrap(); + get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + + // but even with such an outbound channel, additional inbound channels will still fail. + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, + open_channel_msg.temporary_channel_id); + } + + #[test] + fn test_0conf_limiting() { + // Tests that we properly limit inbound channels when we have the manual-channel-acceptance + // flag set and (sometimes) accept channels as 0conf. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut settings = test_default_channel_config(); + settings.manually_accept_inbound_channels = true; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Note that create_network connects the nodes together for us + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap(); + let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + // First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge + for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 { + let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, + &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); + nodes[1].node.peer_connected(&random_pk, &msgs::Init { + features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + + nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap(); + } + _ => panic!("Unexpected event"), + } + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk); + open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); + } + + // If we try to accept a channel from another peer non-0conf it will fail. + let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx, + &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap()); + nodes[1].node.peer_connected(&last_random_pk, &msgs::Init { + features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) { + Err(APIError::APIMisuseError { err }) => + assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"), + _ => panic!(), + } + } + _ => panic!("Unexpected event"), + } + assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, + open_channel_msg.temporary_channel_id); + + // ...however if we accept the same channel 0conf it should work just fine. + nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap(); + } + _ => panic!("Unexpected event"), + } + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk); + } + #[cfg(anchors)] #[test] fn test_anchors_zero_fee_htlc_tx_fallback() { @@ -8377,7 +8769,7 @@ mod tests { _ => panic!("Unexpected event"), } - let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()); + let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); @@ -8430,13 +8822,12 @@ pub mod bench { // Note that this is unrealistic as each payment send will require at least two fsync // calls per node. let network = bitcoin::Network::Testnet; - let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash(); let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))}; let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let logger_a = test_utils::TestLogger::with_id("node a".to_owned()); let scorer = Mutex::new(test_utils::TestScorer::new()); - let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a)), &scorer); + let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer); let mut config: UserConfig = Default::default(); config.channel_handshake_config.minimum_depth = 1; @@ -8446,7 +8837,7 @@ pub mod bench { let keys_manager_a = KeysManager::new(&seed_a, 42, 42); let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters { network, - best_block: BestBlock::from_genesis(network), + best_block: BestBlock::from_network(network), }); let node_a_holder = NodeHolder { node: &node_a }; @@ -8456,12 +8847,12 @@ pub mod bench { let keys_manager_b = KeysManager::new(&seed_b, 42, 42); let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters { network, - best_block: BestBlock::from_genesis(network), + best_block: BestBlock::from_network(network), }); let node_b_holder = NodeHolder { node: &node_b }; - node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }).unwrap(); - node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }).unwrap(); + node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap(); + node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap(); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap(); node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); @@ -8480,7 +8871,7 @@ pub mod bench { assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]); let block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }, + header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }, txdata: vec![tx], }; Listen::block_connected(&node_a, &block, 1); @@ -8519,7 +8910,7 @@ pub mod bench { _ => panic!("Unexpected event"), } - let dummy_graph = NetworkGraph::new(genesis_hash, &logger_a); + let dummy_graph = NetworkGraph::new(network, &logger_a); let mut payment_count: u64 = 0; macro_rules! send_payment { @@ -8544,7 +8935,7 @@ pub mod bench { let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap()); $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]); $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg); - let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id()); + let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, &$node_a.get_our_node_id()); $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa); $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs); $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id())); @@ -8563,7 +8954,7 @@ pub mod bench { _ => panic!("Failed to generate claim event"), } - let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id()); + let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, &$node_b.get_our_node_id()); $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa); $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs); $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));