//! The top-level channel management and payment tracking stuff lives here.
//!
-//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
+//! The [`ChannelManager`] is the main chunk of logic implementing the lightning protocol and is
//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
//! upon reconnect to the relevant peer(s).
//!
-//! It does not manage routing logic (see [`find_route`] for that) nor does it manage constructing
+//! It does not manage routing logic (see [`Router`] for that) nor does it manage constructing
//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
//! imply it needs to fail HTLCs/payments/channels it manages).
-//!
-//! [`find_route`]: crate::routing::router::find_route
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
use crate::ln::wire::Encode;
-use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner};
+use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig};
use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::util::events;
use crate::util::logger::{Level, Logger};
use crate::util::errors::APIError;
+use alloc::collections::BTreeMap;
+
use crate::io;
use crate::prelude::*;
use core::{cmp, mem};
use core::ops::Deref;
// Re-export this for use in the public API.
-pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry};
+pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure};
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
Ok(InterceptId(buf))
}
}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
+pub(crate) enum SentHTLCId {
+ PreviousHopData { short_channel_id: u64, htlc_id: u64 },
+ OutboundRoute { session_priv: SecretKey },
+}
+impl SentHTLCId {
+ pub(crate) fn from_source(source: &HTLCSource) -> Self {
+ match source {
+ HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
+ short_channel_id: hop_data.short_channel_id,
+ htlc_id: hop_data.htlc_id,
+ },
+ HTLCSource::OutboundRoute { session_priv, .. } =>
+ Self::OutboundRoute { session_priv: *session_priv },
+ }
+ }
+}
+impl_writeable_tlv_based_enum!(SentHTLCId,
+ (0, PreviousHopData) => {
+ (0, short_channel_id, required),
+ (2, htlc_id, required),
+ },
+ (2, OutboundRoute) => {
+ (0, session_priv, required),
+ };
+);
+
+
/// Tracks the inbound corresponding to an outbound HTLC
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
#[derive(Clone, PartialEq, Eq)]
first_hop_htlc_msat: u64,
payment_id: PaymentId,
payment_secret: Option<PaymentSecret>,
- /// Note that this is now "deprecated" - we write it for forwards (and read it for
- /// backwards) compatibility reasons, but prefer to use the data in the
- /// [`super::outbound_payment`] module, which stores per-payment data once instead of in
- /// each HTLC.
- payment_params: Option<PaymentParameters>,
},
}
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
0u8.hash(hasher);
prev_hop_data.hash(hasher);
},
- HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payment_params } => {
+ HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat } => {
1u8.hash(hasher);
path.hash(hasher);
session_priv[..].hash(hasher);
payment_id.hash(hasher);
payment_secret.hash(hasher);
first_hop_htlc_msat.hash(hasher);
- payment_params.hash(hasher);
},
}
}
first_hop_htlc_msat: 0,
payment_id: PaymentId([2; 32]),
payment_secret: None,
- payment_params: None,
}
}
}
}
}
#[inline]
- fn ignore_no_close(err: String) -> Self {
- Self {
- err: LightningError {
- err,
- action: msgs::ErrorAction::IgnoreError,
- },
- chan_id: None,
- shutdown_finish: None,
- }
- }
- #[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
Self { err, chan_id: None, shutdown_finish: None }
}
ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
}
+#[derive(Debug)]
pub(crate) enum MonitorUpdateCompletionAction {
/// Indicates that a payment ultimately destined for us was claimed and we should emit an
/// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
EmitEvent { event: events::Event },
}
+impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
+ (0, PaymentClaimed) => { (0, payment_hash, required) },
+ (2, EmitEvent) => { (0, event, upgradable_required) },
+);
+
/// State we hold per-peer.
pub(super) struct PeerState<Signer: ChannelSigner> {
/// `temporary_channel_id` or `channel_id` -> `channel`.
/// Messages to send to the peer - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>,
+ /// Map from a specific channel to some action(s) that should be taken when all pending
+ /// [`ChannelMonitorUpdate`]s for the channel complete updating.
+ ///
+ /// Note that because we generally only have one entry here a HashMap is pretty overkill. A
+ /// BTreeMap currently stores more than ten elements per leaf node, so even up to a few
+ /// channels with a peer this will just be one allocation and will amount to a linear list of
+ /// channels to walk, avoiding the whole hashing rigmarole.
+ ///
+ /// Note that the channel may no longer exist. For example, if a channel was closed but we
+ /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
+ /// for a missing channel. While a malicious peer could construct a second channel with the
+ /// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
+ /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
+ /// duplicates do not occur, so such channels should fail without a monitor update completing.
+ monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
/// The peer is currently connected (i.e. we've seen a
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
/// [`ChannelMessageHandler::peer_disconnected`].
if require_disconnected && self.is_connected {
return false
}
- self.channel_by_id.len() == 0
+ self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
}
}
min_value_msat: Option<u64>,
}
-/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
-/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
+/// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g.
+/// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static
/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
-/// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents
-/// issues such as overly long function definitions. Note that the ChannelManager can take any type
-/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
-/// type alias chooses the concrete types of KeysManager and DefaultRouter.
+/// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents
+/// issues such as overly long function definitions. Note that the `ChannelManager` can take any type
+/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
+/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
+/// of [`KeysManager`] and [`DefaultRouter`].
///
/// (C-not exported) as Arcs don't make sense in bindings
pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
Arc<L>
>;
-/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
-/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
+/// [`SimpleRefChannelManager`] is a type alias for a ChannelManager reference, and is the reference
+/// counterpart to the [`SimpleArcChannelManager`] type alias. Use this type by default when you don't
/// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
-/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
+/// usage of lightning-net-tokio (since `tokio::spawn` requires parameters with static lifetimes).
/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
/// issues such as overly long function definitions. Note that the ChannelManager can take any type
-/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
-/// type alias chooses the concrete types of KeysManager and DefaultRouter.
+/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
+/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
+/// of [`KeysManager`] and [`DefaultRouter`].
///
/// (C-not exported) as Arcs don't make sense in bindings
pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
///
-/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
+/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through
/// to individual Channels.
///
-/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
+/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
/// all peers during write/read (though does not modify this instance, only the instance being
-/// serialized). This will result in any channels which have not yet exchanged funding_created (ie
-/// called funding_transaction_generated for outbound channels).
+/// serialized). This will result in any channels which have not yet exchanged [`funding_created`] (i.e.,
+/// called [`funding_transaction_generated`] for outbound channels) being closed.
///
-/// Note that you can be a bit lazier about writing out ChannelManager than you can be with
-/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
-/// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates
-/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
+/// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
+/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before
+/// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates
+/// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during
/// the serialization process). If the deserialized version is out-of-date compared to the
-/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
-/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
+/// [`ChannelMonitor`] passed by reference to [`read`], those channels will be force-closed based on the
+/// `ChannelMonitor` state and no funds will be lost (mod on-chain transaction fees).
///
-/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which
-/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
-/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
-/// block_connected() to step towards your best block) upon deserialization before using the
-/// object!
+/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
+/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
+/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
///
-/// Note that ChannelManager is responsible for tracking liveness of its channels and generating
-/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
+/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
+/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
-/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
+/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
+///
+/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
+/// inbound channels without confirmed funding transactions. This may result in nodes which we do
+/// not have a channel with being unable to connect to us or open new channels with us if we have
+/// many peers with unfunded channels.
///
-/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
-/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
-/// essentially you should default to using a SimpleRefChannelManager, and use a
-/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
+/// Because it is an indication of trust, inbound channels which we've accepted as 0conf are
+/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
+/// never limited. Please ensure you limit the count of such channels yourself.
+///
+/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
+/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
+/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
+/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
/// you're using lightning-net-tokio.
+///
+/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
+/// [`funding_created`]: msgs::FundingCreated
+/// [`funding_transaction_generated`]: Self::funding_transaction_generated
+/// [`BlockHash`]: bitcoin::hash_types::BlockHash
+/// [`update_channel`]: chain::Watch::update_channel
+/// [`ChannelUpdate`]: msgs::ChannelUpdate
+/// [`timer_tick_occurred`]: Self::timer_tick_occurred
+/// [`read`]: ReadableArgs::read
//
// Lock order:
// The tree structure below illustrates the lock order requirements for the different locks of the
/// [`OutboundPayments::remove_stale_resolved_payments`].
pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
+/// The maximum number of unfunded channels we can have per-peer before we start rejecting new
+/// (inbound) ones. The number of peers with unfunded channels is limited separately in
+/// [`MAX_UNFUNDED_CHANNEL_PEERS`].
+const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
+
+/// The maximum number of peers from which we will allow pending unfunded channels. Once we reach
+/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
+const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
+
+/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
+/// many peers we reject new (inbound) connections.
+const MAX_NO_CHANNEL_PEERS: usize = 250;
+
/// Information needed for constructing an invoice route hint for this channel.
#[derive(Clone, Debug, PartialEq)]
pub struct CounterpartyForwardingInfo {
pub outbound_htlc_maximum_msat: Option<u64>,
}
-/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
+/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
pub fn get_outbound_payment_scid(&self) -> Option<u64> {
self.short_channel_id.or(self.outbound_scid_alias)
}
+
+ fn from_channel<Signer: WriteableEcdsaChannelSigner>(channel: &Channel<Signer>,
+ best_block_height: u32, latest_features: InitFeatures) -> Self {
+
+ let balance = channel.get_available_balances();
+ let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
+ channel.get_holder_counterparty_selected_channel_reserve_satoshis();
+ ChannelDetails {
+ channel_id: channel.channel_id(),
+ counterparty: ChannelCounterparty {
+ node_id: channel.get_counterparty_node_id(),
+ features: latest_features,
+ unspendable_punishment_reserve: to_remote_reserve_satoshis,
+ forwarding_info: channel.counterparty_forwarding_info(),
+ // Ensures that we have actually received the `htlc_minimum_msat` value
+ // from the counterparty through the `OpenChannel` or `AcceptChannel`
+ // message (as they are always the first message from the counterparty).
+ // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
+ // default `0` value set by `Channel::new_outbound`.
+ outbound_htlc_minimum_msat: if channel.have_received_message() {
+ Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
+ outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
+ },
+ funding_txo: channel.get_funding_txo(),
+ // Note that accept_channel (or open_channel) is always the first message, so
+ // `have_received_message` indicates that type negotiation has completed.
+ channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
+ short_channel_id: channel.get_short_channel_id(),
+ outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
+ inbound_scid_alias: channel.latest_inbound_scid_alias(),
+ channel_value_satoshis: channel.get_value_satoshis(),
+ unspendable_punishment_reserve: to_self_reserve_satoshis,
+ balance_msat: balance.balance_msat,
+ inbound_capacity_msat: balance.inbound_capacity_msat,
+ outbound_capacity_msat: balance.outbound_capacity_msat,
+ next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
+ user_channel_id: channel.get_user_id(),
+ confirmations_required: channel.minimum_depth(),
+ confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
+ force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
+ is_outbound: channel.is_outbound(),
+ is_channel_ready: channel.is_usable(),
+ is_usable: channel.is_live(),
+ is_public: channel.should_announce(),
+ inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
+ inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
+ config: Some(channel.config()),
+ }
+ }
}
/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
/// made before LDK version 0.0.104.
payment_hash: Option<PaymentHash>,
},
- /// After a payment is explicitly abandoned by calling [`ChannelManager::abandon_payment`], it
- /// is marked as abandoned until an [`Event::PaymentFailed`] is generated. A payment could also
- /// be marked as abandoned if pathfinding fails repeatedly or retries have been exhausted.
+ /// After a payment's retries are exhausted per the provided [`Retry`], or it is explicitly
+ /// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
+ /// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
Abandoned {
/// Hash of the payment that we have given up trying to send.
payment_hash: PaymentHash,
}
}
-macro_rules! handle_monitor_update_res {
- ($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
- match $err {
- ChannelMonitorUpdateStatus::PermanentFailure => {
- log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
- update_maps_on_chan_removal!($self, $chan);
- // TODO: $failed_fails is dropped here, which will cause other channels to hit the
- // chain in a confused state! We need to move them into the ChannelMonitor which
- // will be responsible for failing backwards once things confirm on-chain.
- // It's ok that we drop $failed_forwards here - at this point we'd rather they
- // broadcast HTLC-Timeout and pay the associated fees to get their funds back than
- // us bother trying to claim it just to forward on to another peer. If we're
- // splitting hairs we'd prefer to claim payments that were to us, but we haven't
- // given up the preimage yet, so might as well just wait until the payment is
- // retried, avoiding the on-chain fees.
- let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
- $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
- (res, true)
- },
- ChannelMonitorUpdateStatus::InProgress => {
- log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
- log_bytes!($chan_id[..]),
- if $resend_commitment && $resend_raa {
- match $action_type {
- RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
- RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
- }
- } else if $resend_commitment { "commitment" }
- else if $resend_raa { "RAA" }
- else { "nothing" },
- (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
- (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
- (&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
- if !$resend_commitment {
- debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
- }
- if !$resend_raa {
- debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
- }
- $chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
- (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
- },
- ChannelMonitorUpdateStatus::Completed => {
- (Ok(()), false)
- },
- }
- };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
- let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
- if drop {
- $entry.remove_entry();
- }
- res
- } };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
- debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
- handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
- } };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
- handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
- };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
- handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
- };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
- };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
- handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
- };
-}
-
macro_rules! send_channel_ready {
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
}
}
+macro_rules! handle_monitor_update_completion {
+ ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
+ let mut updates = $chan.monitor_updating_restored(&$self.logger,
+ &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
+ $self.best_block.read().unwrap().height());
+ let counterparty_node_id = $chan.get_counterparty_node_id();
+ let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() {
+ // We only send a channel_update in the case where we are just now sending a
+ // channel_ready and the channel is in a usable state. We may re-send a
+ // channel_update later through the announcement_signatures process for public
+ // channels, but there's no reason not to just inform our counterparty of our fees
+ // now.
+ if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
+ Some(events::MessageSendEvent::SendChannelUpdate {
+ node_id: counterparty_node_id,
+ msg,
+ })
+ } else { None }
+ } else { None };
+
+ let update_actions = $peer_state.monitor_update_blocked_actions
+ .remove(&$chan.channel_id()).unwrap_or(Vec::new());
+
+ let htlc_forwards = $self.handle_channel_resumption(
+ &mut $peer_state.pending_msg_events, $chan, updates.raa,
+ updates.commitment_update, updates.order, updates.accepted_htlcs,
+ updates.funding_broadcastable, updates.channel_ready,
+ updates.announcement_sigs);
+ if let Some(upd) = channel_update {
+ $peer_state.pending_msg_events.push(upd);
+ }
+
+ let channel_id = $chan.channel_id();
+ core::mem::drop($peer_state_lock);
+ core::mem::drop($per_peer_state_lock);
+
+ $self.handle_monitor_update_completion_actions(update_actions);
+
+ if let Some(forwards) = htlc_forwards {
+ $self.forward_htlcs(&mut [forwards][..]);
+ }
+ $self.finalize_claims(updates.finalized_claimed_htlcs);
+ for failure in updates.failed_htlcs.drain(..) {
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+ $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
+ }
+ } }
+}
+
+macro_rules! handle_new_monitor_update {
+ ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+ // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
+ // any case so that it won't deadlock.
+ debug_assert!($self.id_to_peer.try_lock().is_ok());
+ match $update_res {
+ ChannelMonitorUpdateStatus::InProgress => {
+ log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
+ log_bytes!($chan.channel_id()[..]));
+ Ok(())
+ },
+ ChannelMonitorUpdateStatus::PermanentFailure => {
+ log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
+ log_bytes!($chan.channel_id()[..]));
+ update_maps_on_chan_removal!($self, $chan);
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
+ "ChannelMonitor storage failure".to_owned(), $chan.channel_id(),
+ $chan.get_user_id(), $chan.force_shutdown(false),
+ $self.get_channel_update_for_broadcast(&$chan).ok()));
+ $remove;
+ res
+ },
+ ChannelMonitorUpdateStatus::Completed => {
+ if ($update_id == 0 || $chan.get_next_monitor_update()
+ .expect("We can't be processing a monitor update if it isn't queued")
+ .update_id == $update_id) &&
+ $chan.get_latest_monitor_update_id() == $update_id
+ {
+ handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
+ }
+ Ok(())
+ },
+ }
+ } };
+ ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
+ handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+ }
+}
+
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
R::Target: Router,
L::Target: Logger,
{
- /// Constructs a new ChannelManager to hold several channels and route between them.
+ /// Constructs a new `ChannelManager` to hold several channels and route between them.
///
/// This is the main "logic hub" for all channel-related actions, and implements
- /// ChannelMessageHandler.
+ /// [`ChannelMessageHandler`].
///
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
///
- /// Users need to notify the new ChannelManager when a new block is connected or
- /// disconnected using its `block_connected` and `block_disconnected` methods, starting
- /// from after `params.latest_hash`.
+ /// Users need to notify the new `ChannelManager` when a new block is connected or
+ /// disconnected using its [`block_connected`] and [`block_disconnected`] methods, starting
+ /// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for
+ /// more details.
+ ///
+ /// [`block_connected`]: chain::Listen::block_connected
+ /// [`block_disconnected`]: chain::Listen::block_disconnected
+ /// [`params.best_block.block_hash`]: chain::BestBlock::block_hash
pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for (channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
- let balance = channel.get_available_balances();
- let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
- channel.get_holder_counterparty_selected_channel_reserve_satoshis();
- res.push(ChannelDetails {
- channel_id: (*channel_id).clone(),
- counterparty: ChannelCounterparty {
- node_id: channel.get_counterparty_node_id(),
- features: peer_state.latest_features.clone(),
- unspendable_punishment_reserve: to_remote_reserve_satoshis,
- forwarding_info: channel.counterparty_forwarding_info(),
- // Ensures that we have actually received the `htlc_minimum_msat` value
- // from the counterparty through the `OpenChannel` or `AcceptChannel`
- // message (as they are always the first message from the counterparty).
- // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
- // default `0` value set by `Channel::new_outbound`.
- outbound_htlc_minimum_msat: if channel.have_received_message() {
- Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
- outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
- },
- funding_txo: channel.get_funding_txo(),
- // Note that accept_channel (or open_channel) is always the first message, so
- // `have_received_message` indicates that type negotiation has completed.
- channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
- short_channel_id: channel.get_short_channel_id(),
- outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
- inbound_scid_alias: channel.latest_inbound_scid_alias(),
- channel_value_satoshis: channel.get_value_satoshis(),
- unspendable_punishment_reserve: to_self_reserve_satoshis,
- balance_msat: balance.balance_msat,
- inbound_capacity_msat: balance.inbound_capacity_msat,
- outbound_capacity_msat: balance.outbound_capacity_msat,
- next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
- user_channel_id: channel.get_user_id(),
- confirmations_required: channel.minimum_depth(),
- confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
- force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
- is_outbound: channel.is_outbound(),
- is_channel_ready: channel.is_usable(),
- is_usable: channel.is_live(),
- is_public: channel.should_announce(),
- inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
- inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
- config: Some(channel.config()),
- });
+ for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
+ let details = ChannelDetails::from_channel(channel, best_block_height,
+ peer_state.latest_features.clone());
+ res.push(details);
}
}
}
res
}
- /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
+ /// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for
/// more information.
pub fn list_channels(&self) -> Vec<ChannelDetails> {
self.list_channels_with_filter(|_| true)
}
- /// Gets the list of usable channels, in random order. Useful as an argument to [`find_route`]
- /// to ensure non-announced channels are used.
+ /// Gets the list of usable channels, in random order. Useful as an argument to
+ /// [`Router::find_route`] to ensure non-announced channels are used.
///
/// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
/// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
/// are.
- ///
- /// [`find_route`]: crate::routing::router::find_route
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
// Note we use is_live here instead of usable which leads to somewhat confused
// internal/external nomenclature, but that's ok cause that's probably what the user
self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
}
+ /// Gets the list of channels we have with a given counterparty, in random order.
+ pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
+ let best_block_height = self.best_block.read().unwrap().height();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let features = &peer_state.latest_features;
+ return peer_state.channel_by_id
+ .iter()
+ .map(|(_, channel)|
+ ChannelDetails::from_channel(channel, best_block_height, features.clone()))
+ .collect();
+ }
+ vec![]
+ }
+
/// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a
/// successful path, or have unresolved HTLCs.
///
/// This can be useful for payments that may have been prepared, but ultimately not sent, as a
/// result of a crash. If such a payment exists, is not listed here, and an
- /// [`Event::PaymentSent`] has not been received, you may consider retrying the payment.
+ /// [`Event::PaymentSent`] has not been received, you may consider resending the payment.
///
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
- let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.signer_provider, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?;
+ let funding_txo_opt = chan_entry.get().get_funding_txo();
+ let their_features = &peer_state.latest_features;
+ let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
+ .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?;
failed_htlcs = htlcs;
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update {
- let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
- let (result, is_permanent) =
- handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, chan_entry);
- break result;
- }
- }
-
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
- msg: shutdown_msg
+ msg: shutdown_msg,
});
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt.take() {
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+ break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
+ }
+
if chan_entry.get().is_shutdown() {
let channel = remove_channel!(self, chan_entry);
if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
/// would appear on a force-closure transaction, whichever is lower. We will allow our
/// counterparty to pay as much fee as they'd like, however.
///
- /// May generate a SendShutdown message event on success, which should be relayed.
+ /// May generate a [`SendShutdown`] message event on success, which should be relayed.
///
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ /// [`SendShutdown`]: crate::util::events::MessageSendEvent::SendShutdown
pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, None)
}
/// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
/// will appear on a force-closure transaction, whichever is lower).
///
- /// May generate a SendShutdown message event on success, which should be relayed.
+ /// May generate a [`SendShutdown`] message event on success, which should be relayed.
///
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ /// [`SendShutdown`]: crate::util::events::MessageSendEvent::SendShutdown
pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
}
pending_forward_info
}
- /// Gets the current channel_update for the given channel. This first checks if the channel is
+ /// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
/// public, and thus should be called whenever the result is going to be passed out in a
/// [`MessageSendEvent::BroadcastChannelUpdate`] event.
///
- /// Note that in `internal_closing_signed`, this function is called without the `peer_state`
+ /// Note that in [`internal_closing_signed`], this function is called without the `peer_state`
/// corresponding to the channel's counterparty locked, as the channel been removed from the
/// storage and the `peer_state` lock has been dropped.
+ ///
+ /// [`channel_update`]: msgs::ChannelUpdate
+ /// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_broadcast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
if !chan.should_announce() {
return Err(LightningError {
self.get_channel_update_for_unicast(chan)
}
- /// Gets the current channel_update for the given channel. This does not check if the channel
- /// is public (only returning an Err if the channel does not yet have an assigned short_id),
+ /// Gets the current [`channel_update`] for the given channel. This does not check if the channel
+ /// is public (only returning an `Err` if the channel does not yet have an assigned SCID),
/// and thus MUST NOT be called unless the recipient of the resulting message has already
/// provided evidence that they know about the existence of the channel.
///
- /// Note that through `internal_closing_signed`, this function is called without the
+ /// Note that through [`internal_closing_signed`], this function is called without the
/// `peer_state` corresponding to the channel's counterparty locked, as the channel been
/// removed from the storage and the `peer_state` lock has been dropped.
+ ///
+ /// [`channel_update`]: msgs::ChannelUpdate
+ /// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
})
}
- // Only public for testing, this should otherwise never be called direcly
- pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ #[cfg(test)]
+ pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ let _lck = self.total_consistency_lock.read().unwrap();
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
+ }
+
+ fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ // The top-level caller should hold the total_consistency_lock read lock.
+ debug_assert!(self.total_consistency_lock.try_write().is_err());
+
log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
let prng_seed = self.entropy_source.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
- .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected"})?;
+ .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
if onion_utils::route_size_insane(&onion_payloads) {
- return Err(APIError::InvalidRoute{err: "Route size too large considering onion data"});
+ return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
}
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
- .ok_or_else(|| APIError::InvalidRoute{err: "No peer matching the path's first hop found!" })?;
+ .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
- match {
- if !chan.get().is_live() {
- return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
- }
- break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
- htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
- path: path.clone(),
- session_priv: session_priv.clone(),
- first_hop_htlc_msat: htlc_msat,
- payment_id,
- payment_secret: payment_secret.clone(),
- payment_params: payment_params.clone(),
- }, onion_packet, &self.logger),
- chan)
- } {
- Some((update_add, commitment_signed, monitor_update)) => {
- let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
- let chan_id = chan.get().channel_id();
- match (update_err,
- handle_monitor_update_res!(self, update_err, chan,
- RAACommitmentOrder::CommitmentFirst, false, true))
- {
- (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
- (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
- (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
- // Note that MonitorUpdateInProgress here indicates (per function
- // docs) that we will resend the commitment update once monitor
- // updating completes. Therefore, we must return an error
- // indicating that it is unsafe to retry the payment wholesale,
- // which we do in the send_payment check for
- // MonitorUpdateInProgress, below.
- return Err(APIError::MonitorUpdateInProgress);
- },
- _ => unreachable!(),
+ if !chan.get().is_live() {
+ return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
+ }
+ let funding_txo = chan.get().get_funding_txo().unwrap();
+ let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
+ htlc_cltv, HTLCSource::OutboundRoute {
+ path: path.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ payment_id,
+ payment_secret: payment_secret.clone(),
+ }, onion_packet, &self.logger);
+ match break_chan_entry!(self, send_res, chan) {
+ Some(monitor_update) => {
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
+ if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) {
+ break Err(e);
+ }
+ if update_res == ChannelMonitorUpdateStatus::InProgress {
+ // Note that MonitorUpdateInProgress here indicates (per function
+ // docs) that we will resend the commitment update once monitor
+ // updating completes. Therefore, we must return an error
+ // indicating that it is unsafe to retry the payment wholesale,
+ // which we do in the send_payment check for
+ // MonitorUpdateInProgress, below.
+ return Err(APIError::MonitorUpdateInProgress);
}
-
- log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
- peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: path.first().unwrap().pubkey,
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: vec![update_add],
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- },
- });
},
None => { },
}
/// Value parameters are provided via the last hop in route, see documentation for [`RouteHop`]
/// fields for more info.
///
- /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via
+ /// May generate [`UpdateHTLCs`] message(s) event on success, which should be relayed (e.g. via
/// [`PeerManager::process_events`]).
///
/// # Avoiding Duplicate Payments
/// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
/// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment
/// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an
- /// [`Event::PaymentSent`]) LDK will not stop you from sending a second payment with the same
- /// [`PaymentId`].
+ /// [`Event::PaymentSent`] or [`Event::PaymentFailed`]) LDK will not stop you from sending a
+ /// second payment with the same [`PaymentId`].
///
/// Thus, in order to ensure duplicate payments are not sent, you should implement your own
/// tracking of payments, including state to indicate once a payment has completed. Because you
///
/// # Possible Error States on [`PaymentSendFailure`]
///
- /// Each path may have a different return value, and PaymentSendValue may return a Vec with
+ /// Each path may have a different return value, and [`PaymentSendFailure`] may return a `Vec` with
/// each entry matching the corresponding-index entry in the route paths, see
/// [`PaymentSendFailure`] for more info.
///
/// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
/// relevant updates.
///
- /// Note that depending on the type of the PaymentSendFailure the HTLC may have been
+ /// Note that depending on the type of the [`PaymentSendFailure`] the HTLC may have been
/// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
/// different route unless you intend to pay twice!
///
/// [`Route`], we assume the invoice had the basic_mpp feature set.
///
/// [`Event::PaymentSent`]: events::Event::PaymentSent
+ /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
+ /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs
/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments
.send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
/// `route_params` and retry failed payment paths based on `retry_strategy`.
- pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), PaymentSendFailure> {
+ pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments
.send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params,
- &self.router, self.list_usable_channels(), self.compute_inflight_htlcs(),
+ &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ &self.pending_events,
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
}
- /// Retries a payment along the given [`Route`].
- ///
- /// Errors returned are a superset of those returned from [`send_payment`], so see
- /// [`send_payment`] documentation for more details on errors. This method will also error if the
- /// retry amount puts the payment more than 10% over the payment's total amount, if the payment
- /// for the given `payment_id` cannot be found (likely due to timeout or success), or if
- /// further retries have been disabled with [`abandon_payment`].
- ///
- /// [`send_payment`]: [`ChannelManager::send_payment`]
- /// [`abandon_payment`]: [`ChannelManager::abandon_payment`]
- pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
- self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
- }
-
- /// Signals that no further retries for the given payment will occur.
+ /// Signals that no further retries for the given payment should occur. Useful if you have a
+ /// pending outbound payment with retries remaining, but wish to stop retrying the payment before
+ /// retries are exhausted.
///
- /// After this method returns, no future calls to [`retry_payment`] for the given `payment_id`
- /// are allowed. If no [`Event::PaymentFailed`] event had been generated before, one will be
- /// generated as soon as there are no remaining pending HTLCs for this payment.
+ /// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
+ /// as there are no remaining pending HTLCs for this payment.
///
/// Note that calling this method does *not* prevent a payment from succeeding. You must still
/// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
/// determine the ultimate status of a payment.
///
/// If an [`Event::PaymentFailed`] event is generated and we restart without this
- /// [`ChannelManager`] having been persisted, the payment may still be in the pending state
- /// upon restart. This allows further calls to [`retry_payment`] (and requiring a second call
- /// to [`abandon_payment`] to mark the payment as failed again). Otherwise, future calls to
- /// [`retry_payment`] will fail with [`PaymentSendFailure::ParameterError`].
+ /// [`ChannelManager`] having been persisted, another [`Event::PaymentFailed`] may be generated.
///
- /// [`abandon_payment`]: Self::abandon_payment
- /// [`retry_payment`]: Self::retry_payment
/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn abandon_payment(&self, payment_id: PaymentId) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- if let Some(payment_failed_ev) = self.pending_outbound_payments.abandon_payment(payment_id) {
- self.pending_events.lock().unwrap().push(payment_failed_ev);
- }
+ self.pending_outbound_payments.abandon_payment(payment_id, &self.pending_events);
}
/// Send a spontaneous payment, which is a payment that does not require the recipient to have
/// [`send_payment`]: Self::send_payment
pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_spontaneous_payment_with_route(
route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer,
best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route
/// payments.
///
/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
- pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, PaymentSendFailure> {
+ pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id,
retry_strategy, route_params, &self.router, self.list_usable_channels(),
- self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
- &self.logger,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
+ &self.logger, &self.pending_events,
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Send a payment that is probing the given route for liquidity. We calculate the
/// us to easily discern them from real payments.
pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
(chan, funding_msg)
},
Err(_) => { return Err(APIError::ChannelUnavailable {
- err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
+ err: "Signer refused to sign the initial commitment transaction".to_owned()
}) },
}
};
let best_block_height = self.best_block.read().unwrap().height();
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
- || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
+ &self.pending_events, &self.logger,
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
///
/// This currently includes:
/// * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
- /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
+ /// * Broadcasting [`ChannelUpdate`] messages if we've been disconnected from our peer for more
/// than a minute, informing the network that they should no longer attempt to route over
/// the channel.
- /// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs
- /// with the current `ChannelConfig`.
+ /// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
+ /// with the current [`ChannelConfig`].
/// * Removing peers which have disconnected but and no longer have any channels.
///
- /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
+ /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
/// estimate fetches.
+ ///
+ /// [`ChannelUpdate`]: msgs::ChannelUpdate
+ /// [`ChannelConfig`]: crate::util::config::ChannelConfig
pub fn timer_tick_occurred(&self) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = NotifyOption::SkipPersist;
/// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
/// startup during which time claims that were in-progress at shutdown may be replayed.
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
- self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails);
+ self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
}
/// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the
/// reason for the failure.
///
/// See [`FailureCode`] for valid failure codes.
- pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) {
+ pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash);
}
/// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
- fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
+ fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
match failure_code {
- FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16),
- FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16),
+ FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16),
+ FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16),
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
- HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data)
+ HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data)
}
}
}
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
- HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => {
- self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, session_priv, payment_id, payment_params, self.probing_cookie_secret, &self.secp_ctx, &self.pending_events, &self.logger);
+ HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
+ if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
+ session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
+ &self.pending_events, &self.logger)
+ { self.push_pending_forwards_ev(); }
},
HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => {
log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error);
let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret);
- let mut forward_event = None;
+ let mut push_forward_ev = false;
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
if forward_htlcs.is_empty() {
- forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
+ push_forward_ev = true;
}
match forward_htlcs.entry(*short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
}
}
mem::drop(forward_htlcs);
+ if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
- if let Some(time) = forward_event {
- pending_events.push(events::Event::PendingHTLCsForwardable {
- time_forwardable: time
- });
- }
pending_events.push(events::Event::HTLCHandlingFailed {
prev_channel_id: outpoint.to_channel_id(),
failed_next_destination: destination,
let per_peer_state = self.per_peer_state.read().unwrap();
let chan_id = prev_hop.outpoint.to_channel_id();
-
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
None => None
};
- let mut peer_state_opt = counterparty_node_id_opt.as_ref().map(
+ let peer_state_opt = counterparty_node_id_opt.as_ref().map(
|counterparty_node_id| per_peer_state.get(counterparty_node_id).map(
|peer_mutex| peer_mutex.lock().unwrap()
)
).unwrap_or(None);
- if let Some(hash_map::Entry::Occupied(mut chan)) = peer_state_opt.as_mut().map(|peer_state| peer_state.channel_by_id.entry(chan_id))
- {
- let counterparty_node_id = chan.get().get_counterparty_node_id();
- match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
- Ok(msgs_monitor_option) => {
- if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
- match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
- "Failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
- let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state);
- self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
- return Err((counterparty_node_id, err));
- }
- }
- if let Some((msg, commitment_signed)) = msgs {
- log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
- log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
- peer_state_opt.as_mut().unwrap().pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id,
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: vec![msg],
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- }
- });
- }
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state);
- self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
- Ok(())
- } else {
- Ok(())
- }
- },
- Err((e, monitor_update)) => {
- match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- // TODO: This needs to be handled somehow - if we receive a monitor update
- // with a preimage we *must* somehow manage to propagate it to the upstream
- // channel, or we must have an ability to receive the same update and try
- // again on restart.
- log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
- "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
- payment_preimage, e);
- },
+ if peer_state_opt.is_some() {
+ let mut peer_state_lock = peer_state_opt.unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
+ let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+ if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+ if let Some(action) = completion_action(Some(htlc_value_msat)) {
+ log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+ log_bytes!(chan_id), action);
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}
- let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
- if drop {
- chan.remove_entry();
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
+ let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+ peer_state, per_peer_state, chan);
+ if let Err(e) = res {
+ // TODO: This is a *critical* error - we probably updated the outbound edge
+ // of the HTLC's monitor with a preimage. We should retry this monitor
+ // update over and over again until morale improves.
+ log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+ return Err((counterparty_node_id, e));
}
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state);
- self.handle_monitor_update_completion_actions(completion_action(None));
- Err((counterparty_node_id, res))
- },
- }
- } else {
- let preimage_update = ChannelMonitorUpdate {
- update_id: CLOSED_CHANNEL_UPDATE_ID,
- updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
- payment_preimage,
- }],
- };
- // We update the ChannelMonitor on the backward link, after
- // receiving an `update_fulfill_htlc` from the forward link.
- let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
- if update_res != ChannelMonitorUpdateStatus::Completed {
- // TODO: This needs to be handled somehow - if we receive a monitor update
- // with a preimage we *must* somehow manage to propagate it to the upstream
- // channel, or we must have an ability to receive the same event and try
- // again on restart.
- log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, update_res);
+ }
+ return Ok(());
}
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state);
- // Note that we do process the completion action here. This totally could be a
- // duplicate claim, but we have no way of knowing without interrogating the
- // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
- // generally always allowed to be duplicative (and it's specifically noted in
- // `PaymentForwarded`).
- self.handle_monitor_update_completion_actions(completion_action(None));
- Ok(())
}
+ let preimage_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage,
+ }],
+ };
+ // We update the ChannelMonitor on the backward link, after
+ // receiving an `update_fulfill_htlc` from the forward link.
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
+ if update_res != ChannelMonitorUpdateStatus::Completed {
+ // TODO: This needs to be handled somehow - if we receive a monitor update
+ // with a preimage we *must* somehow manage to propagate it to the upstream
+ // channel, or we must have an ability to receive the same event and try
+ // again on restart.
+ log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, update_res);
+ }
+ // Note that we do process the completion action here. This totally could be a
+ // duplicate claim, but we have no way of knowing without interrogating the
+ // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
+ // generally always allowed to be duplicative (and it's specifically noted in
+ // `PaymentForwarded`).
+ self.handle_monitor_update_completion_actions(completion_action(None));
+ Ok(())
}
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+ log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
+ log_bytes!(channel.channel_id()),
+ if raa.is_some() { "an" } else { "no" },
+ if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
+ if funding_broadcastable.is_some() { "" } else { "not " },
+ if channel_ready.is_some() { "sending" } else { "without" },
+ if announcement_sigs.is_some() { "sending" } else { "without" });
+
let mut htlc_forwards = None;
let counterparty_node_id = channel.get_counterparty_node_id();
}
fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
- let htlc_forwards;
- let (mut pending_failures, finalized_claims, counterparty_node_id) = {
- let counterparty_node_id = match counterparty_node_id {
- Some(cp_id) => cp_id.clone(),
- None => {
- // TODO: Once we can rely on the counterparty_node_id from the
- // monitor event, this and the id_to_peer map should be removed.
- let id_to_peer = self.id_to_peer.lock().unwrap();
- match id_to_peer.get(&funding_txo.to_channel_id()) {
- Some(cp_id) => cp_id.clone(),
- None => return,
- }
- }
- };
- let per_peer_state = self.per_peer_state.read().unwrap();
- let mut peer_state_lock;
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if peer_state_mutex_opt.is_none() { return }
- peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let mut channel = {
- match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
- hash_map::Entry::Occupied(chan) => chan,
- hash_map::Entry::Vacant(_) => return,
+ let counterparty_node_id = match counterparty_node_id {
+ Some(cp_id) => cp_id.clone(),
+ None => {
+ // TODO: Once we can rely on the counterparty_node_id from the
+ // monitor event, this and the id_to_peer map should be removed.
+ let id_to_peer = self.id_to_peer.lock().unwrap();
+ match id_to_peer.get(&funding_txo.to_channel_id()) {
+ Some(cp_id) => cp_id.clone(),
+ None => return,
}
- };
- if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
- return;
}
-
- let updates = channel.get_mut().monitor_updating_restored(&self.logger, &self.node_signer, self.genesis_hash, &self.default_configuration, self.best_block.read().unwrap().height());
- let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
- // We only send a channel_update in the case where we are just now sending a
- // channel_ready and the channel is in a usable state. We may re-send a
- // channel_update later through the announcement_signatures process for public
- // channels, but there's no reason not to just inform our counterparty of our fees
- // now.
- if let Ok(msg) = self.get_channel_update_for_unicast(channel.get()) {
- Some(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.get().get_counterparty_node_id(),
- msg,
- })
- } else { None }
- } else { None };
- htlc_forwards = self.handle_channel_resumption(&mut peer_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
- if let Some(upd) = channel_update {
- peer_state.pending_msg_events.push(upd);
+ };
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let mut peer_state_lock;
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return }
+ peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let mut channel = {
+ match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
+ hash_map::Entry::Occupied(chan) => chan,
+ hash_map::Entry::Vacant(_) => return,
}
-
- (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
};
- if let Some(forwards) = htlc_forwards {
- self.forward_htlcs(&mut [forwards][..]);
- }
- self.finalize_claims(finalized_claims);
- for failure in pending_failures.drain(..) {
- let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
- self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
+ log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}",
+ highest_applied_update_id, channel.get().get_latest_monitor_update_id());
+ if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
+ return;
}
+ handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
}
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty());
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let is_only_peer_channel = peer_state.channel_by_id.len() == 1;
match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
hash_map::Entry::Occupied(mut channel) => {
if !channel.get().inbound_is_awaiting_accept() {
peer_state.pending_msg_events.push(send_msg_err_event);
let _ = remove_channel!(self, channel);
return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+ } else {
+ // If this peer already has some channels, a new channel won't increase our number of peers
+ // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+ // channels per-peer we can accept channels from a peer with existing ones.
+ if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.get().get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let _ = remove_channel!(self, channel);
+ return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+ }
}
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
Ok(())
}
+ /// Gets the number of peers which match the given filter and do not have any funded, outbound,
+ /// or 0-conf channels.
+ ///
+ /// The filter is called for each peer and provided with the number of unfunded, inbound, and
+ /// non-0-conf channels we have with the peer.
+ fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
+ where Filter: Fn(&PeerState<<SP::Target as SignerProvider>::Signer>) -> bool {
+ let mut peers_without_funded_channels = 0;
+ let best_block_height = self.best_block.read().unwrap().height();
+ {
+ let peer_state_lock = self.per_peer_state.read().unwrap();
+ for (_, peer_mtx) in peer_state_lock.iter() {
+ let peer = peer_mtx.lock().unwrap();
+ if !maybe_count_peer(&*peer) { continue; }
+ let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
+ if num_unfunded_channels == peer.channel_by_id.len() {
+ peers_without_funded_channels += 1;
+ }
+ }
+ }
+ return peers_without_funded_channels;
+ }
+
+ fn unfunded_channel_count(
+ peer: &PeerState<<SP::Target as SignerProvider>::Signer>, best_block_height: u32
+ ) -> usize {
+ let mut num_unfunded_channels = 0;
+ for (_, chan) in peer.channel_by_id.iter() {
+ if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
+ chan.get_funding_tx_confirmations(best_block_height) == 0
+ {
+ num_unfunded_channels += 1;
+ }
+ }
+ num_unfunded_channels
+ }
+
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
if msg.chain_hash != self.genesis_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
let mut random_bytes = [0u8; 16];
random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
let user_channel_id = u128::from_be_bytes(random_bytes);
-
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+
+ // Get the number of peers with channels, but without funded ones. We don't care too much
+ // about peers that never open a channel, so we filter by peers that have at least one
+ // channel, and then limit the number of those with unfunded channels.
+ let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty());
+
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+
+ // If this peer already has some channels, a new channel won't increase our number of peers
+ // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+ // channels per-peer we can accept channels from a peer with existing ones.
+ if peer_state.channel_by_id.is_empty() &&
+ channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
+ !self.default_configuration.manually_accept_inbound_channels
+ {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
+ msg.temporary_channel_id.clone()));
+ }
+
+ let best_block_height = self.best_block.read().unwrap().height();
+ if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
+ msg.temporary_channel_id.clone()));
+ }
+
let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
- counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration,
- self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
+ counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
+ &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
{
Err(e) => {
self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
}
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
+ let best_block = *self.best_block.read().unwrap();
+
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
})?;
- let ((funding_msg, monitor, mut channel_ready), mut chan) = {
- let best_block = *self.best_block.read().unwrap();
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
+
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let ((funding_msg, monitor), chan) =
match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
hash_map::Entry::Occupied(mut chan) => {
(try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
- }
- };
- // Because we have exclusive ownership of the channel here we can release the peer_state
- // lock before watch_channel
- match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
- ChannelMonitorUpdateStatus::Completed => {},
- ChannelMonitorUpdateStatus::PermanentFailure => {
- // Note that we reply with the new channel_id in error messages if we gave up on the
- // channel, not the temporary_channel_id. This is compatible with ourselves, but the
- // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
- // any messages referencing a previously-closed channel anyway.
- // We do not propagate the monitor update to the user as it would be for a monitor
- // that we didn't manage to store (and that we don't care about - we don't respond
- // with the funding_signed so the channel can never go on chain).
- let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
- assert!(failed_htlcs.is_empty());
- return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
- },
- ChannelMonitorUpdateStatus::InProgress => {
- // There's no problem signing a counterparty's funding transaction if our monitor
- // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
- // accepted payment from yet. We do, however, need to wait to send our channel_ready
- // until we have persisted our monitor.
- chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
- channel_ready = None; // Don't send the channel_ready now
- },
- }
- // It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the
- // peer exists, despite the inner PeerState potentially having no channels after removing
- // the channel above.
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
+ };
+
match peer_state.channel_by_id.entry(funding_msg.channel_id) {
hash_map::Entry::Occupied(_) => {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
+ Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
},
hash_map::Entry::Vacant(e) => {
- let mut id_to_peer = self.id_to_peer.lock().unwrap();
- match id_to_peer.entry(chan.channel_id()) {
+ match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) {
hash_map::Entry::Occupied(_) => {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
i_e.insert(chan.get_counterparty_node_id());
}
}
+
+ // There's no problem signing a counterparty's funding transaction if our monitor
+ // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+ // accepted payment from yet. We do, however, need to wait to send our channel_ready
+ // until we have persisted our monitor.
+ let new_channel_id = funding_msg.channel_id;
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
node_id: counterparty_node_id.clone(),
msg: funding_msg,
});
- if let Some(msg) = channel_ready {
- send_channel_ready!(self, peer_state.pending_msg_events, chan, msg);
+
+ let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
+
+ let chan = e.insert(chan);
+ let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state,
+ per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
+
+ // Note that we reply with the new channel_id in error messages if we gave up on the
+ // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+ // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+ // any messages referencing a previously-closed channel anyway.
+ // We do not propagate the monitor update to the user as it would be for a monitor
+ // that we didn't manage to store (and that we don't care about - we don't respond
+ // with the funding_signed so the channel can never go on chain).
+ if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
+ res.0 = None;
}
- e.insert(chan);
+ res
}
}
- Ok(())
}
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
- let funding_tx = {
- let best_block = *self.best_block.read().unwrap();
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex = per_peer_state.get(counterparty_node_id)
- .ok_or_else(|| {
- debug_assert!(false);
- MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
- })?;
+ let best_block = *self.best_block.read().unwrap();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) {
- Ok(update) => update,
- Err(e) => try_chan_entry!(self, Err(e), chan),
- };
- match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
- // We weren't able to watch the channel to begin with, so no updates should be made on
- // it. Previously, full_stack_target found an (unreachable) panic when the
- // monitor update contained within `shutdown_finish` was applied.
- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
- shutdown_finish.0.take();
- }
- }
- return res
- },
- }
- if let Some(msg) = channel_ready {
- send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg);
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ let monitor = try_chan_entry!(self,
+ chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
+ let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
+ let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
+ if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+ // We weren't able to watch the channel to begin with, so no updates should be made on
+ // it. Previously, full_stack_target found an (unreachable) panic when the
+ // monitor update contained within `shutdown_finish` was applied.
+ if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+ shutdown_finish.0.take();
}
- funding_tx
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
- }
- };
- log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
- self.tx_broadcaster.broadcast_transaction(&funding_tx);
- Ok(())
+ }
+ res
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+ }
}
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
}
- let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
+ let funding_txo_opt = chan_entry.get().get_funding_txo();
+ let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
+ chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
dropped_htlcs = htlcs;
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update {
- let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
- let (result, is_permanent) =
- handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, chan_entry);
- break result;
- }
- }
-
if let Some(msg) = shutdown {
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
msg,
});
}
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt {
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+ break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
+ }
break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
}
- let _ = handle_error!(self, result, *counterparty_node_id);
- Ok(())
+ result
}
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- let (revoke_and_ack, commitment_signed, monitor_update) =
- match chan.get_mut().commitment_signed(&msg, &self.logger) {
- Err((None, e)) => try_chan_entry!(self, Err(e), chan),
- Err((Some(update), e)) => {
- assert!(chan.get().is_awaiting_monitor_update());
- let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &update);
- try_chan_entry!(self, Err(e), chan);
- unreachable!();
- },
- Ok(res) => res
- };
- let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
- if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
- return Err(e);
- }
-
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: counterparty_node_id.clone(),
- msg: revoke_and_ack,
- });
- if let Some(msg) = commitment_signed {
- peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id.clone(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed: msg,
- },
- });
- }
- Ok(())
+ let funding_txo = chan.get().get_funding_txo();
+ let monitor_update = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_id = monitor_update.update_id;
+ handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+ peer_state, per_peer_state, chan)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
#[inline]
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
- let mut forward_event = None;
+ let mut push_forward_event = false;
let mut new_intercept_events = Vec::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
// We don't want to generate a PendingHTLCsForwardable event if only intercepted
// payments are being processed.
if forward_htlcs_empty {
- forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
+ push_forward_event = true;
}
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_intercept_events);
}
+ if push_forward_event { self.push_pending_forwards_ev() }
+ }
+ }
- match forward_event {
- Some(time) => {
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PendingHTLCsForwardable {
- time_forwardable: time
- });
- }
- None => {},
- }
+ // We only want to push a PendingHTLCsForwardable event if no others are queued.
+ fn push_pending_forwards_ev(&self) {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ let forward_ev_exists = pending_events.iter()
+ .find(|ev| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
+ .is_some();
+ if !forward_ev_exists {
+ pending_events.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable:
+ Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
+ });
}
}
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
- let mut htlcs_to_fail = Vec::new();
- let res = loop {
+ let (htlcs_to_fail, res) = {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
- })?;
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ }).map(|mtx| mtx.lock().unwrap())?;
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
- let raa_updates = break_chan_entry!(self,
- chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
- htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
- let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &raa_updates.monitor_update);
- if was_paused_for_mon_update {
- assert!(update_res != ChannelMonitorUpdateStatus::Completed);
- assert!(raa_updates.commitment_update.is_none());
- assert!(raa_updates.accepted_htlcs.is_empty());
- assert!(raa_updates.failed_htlcs.is_empty());
- assert!(raa_updates.finalized_claimed_htlcs.is_empty());
- break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
- }
- if update_res != ChannelMonitorUpdateStatus::Completed {
- if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
- RAACommitmentOrder::CommitmentFirst, false,
- raa_updates.commitment_update.is_some(), false,
- raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
- raa_updates.finalized_claimed_htlcs) {
- break Err(e);
- } else { unreachable!(); }
- }
- if let Some(updates) = raa_updates.commitment_update {
- peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id.clone(),
- updates,
- });
- }
- break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
- raa_updates.finalized_claimed_htlcs,
- chan.get().get_short_channel_id()
- .unwrap_or(chan.get().outbound_scid_alias()),
- chan.get().get_funding_txo().unwrap(),
- chan.get().get_user_id()))
+ let funding_txo = chan.get().get_funding_txo();
+ let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_id = monitor_update.update_id;
+ let res = handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lock, peer_state, per_peer_state, chan);
+ (htlcs_to_fail, res)
},
- hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
- match res {
- Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
- short_channel_id, channel_outpoint, user_channel_id)) =>
- {
- for failure in pending_failures.drain(..) {
- let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
- self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
- }
- self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, user_channel_id, pending_forwards)]);
- self.finalize_claims(finalized_claim_htlcs);
- Ok(())
- },
- Err(e) => Err(e)
- }
+ res
}
fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
Ok(())
}
- /// Process pending events from the `chain::Watch`, returning whether any events were processed.
+ /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
fn process_pending_monitor_events(&self) -> bool {
+ debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
+
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
/// update events as a separate process method here.
#[cfg(fuzzing)]
pub fn process_monitor_events(&self) {
- self.process_pending_monitor_events();
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ if self.process_pending_monitor_events() {
+ NotifyOption::DoPersist
+ } else {
+ NotifyOption::SkipPersist
+ }
+ });
}
/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
let mut has_monitor_update = false;
let mut failed_htlcs = Vec::new();
let mut handle_errors = Vec::new();
- {
- let per_peer_state = self.per_peer_state.read().unwrap();
+ // Walk our list of channels and find any that need to update. Note that when we do find an
+ // update, if it includes actions that must be taken afterwards, we have to drop the
+ // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
+ // manage to go through all our peers without finding a single channel to update.
+ 'peer_loop: loop {
+ let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.retain(|channel_id, chan| {
- match chan.maybe_free_holding_cell_htlcs(&self.logger) {
- Ok((commitment_opt, holding_cell_failed_htlcs)) => {
- if !holding_cell_failed_htlcs.is_empty() {
- failed_htlcs.push((
- holding_cell_failed_htlcs,
- *channel_id,
- chan.get_counterparty_node_id()
- ));
- }
- if let Some((commitment_update, monitor_update)) = commitment_opt {
- match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), &monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get_counterparty_node_id(),
- updates: commitment_update,
- });
- },
- e => {
- has_monitor_update = true;
- let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
- handle_errors.push((chan.get_counterparty_node_id(), res));
- if close_channel { return false; }
- },
- }
+ 'chan_loop: loop {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
+ for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
+ let counterparty_node_id = chan.get_counterparty_node_id();
+ let funding_txo = chan.get_funding_txo();
+ let (monitor_opt, holding_cell_failed_htlcs) =
+ chan.maybe_free_holding_cell_htlcs(&self.logger);
+ if !holding_cell_failed_htlcs.is_empty() {
+ failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
+ }
+ if let Some(monitor_update) = monitor_opt {
+ has_monitor_update = true;
+
+ let update_res = self.chain_monitor.update_channel(
+ funding_txo.expect("channel is live"), monitor_update);
+ let update_id = monitor_update.update_id;
+ let channel_id: [u8; 32] = *channel_id;
+ let res = handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
+ peer_state.channel_by_id.remove(&channel_id));
+ if res.is_err() {
+ handle_errors.push((counterparty_node_id, res));
}
- true
- },
- Err(e) => {
- let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
- handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
- // ChannelClosed event is generated by handle_error for us
- !close_channel
+ continue 'peer_loop;
}
}
- });
+ break 'chan_loop;
+ }
}
+ break 'peer_loop;
}
let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
}
- fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
+ fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut failed_channels = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
- log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
- log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+ log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.",
+ log_pubkey!(counterparty_node_id));
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
peer_state.is_connected = false;
peer_state.ok_to_remove(true)
- } else { true }
+ } else { debug_assert!(false, "Unconnected peer disconnected"); true }
};
if remove_peer {
per_peer_state.remove(counterparty_node_id);
}
}
- fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
+ fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
if !init_msg.features.supports_static_remote_key() {
- log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id));
+ log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
return Err(());
}
- log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
-
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ // If we have too many peers connected which don't have funded channels, disconnect the
+ // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
+ // unfunded channels taking up space in memory for disconnected peers, we still let new
+ // peers connect, but we'll reject new channels from them.
+ let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
+ let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
+
{
let mut peer_state_lock = self.per_peer_state.write().unwrap();
match peer_state_lock.entry(counterparty_node_id.clone()) {
hash_map::Entry::Vacant(e) => {
+ if inbound_peer_limited {
+ return Err(());
+ }
e.insert(Mutex::new(PeerState {
channel_by_id: HashMap::new(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
+ monitor_update_blocked_actions: BTreeMap::new(),
is_connected: true,
}));
},
hash_map::Entry::Occupied(e) => {
let mut peer_state = e.get().lock().unwrap();
peer_state.latest_features = init_msg.features.clone();
+
+ let best_block_height = self.best_block.read().unwrap().height();
+ if inbound_peer_limited &&
+ Self::unfunded_channel_count(&*peer_state, best_block_height) ==
+ peer_state.channel_by_id.len()
+ {
+ return Err(());
+ }
+
debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
peer_state.is_connected = true;
},
}
}
- let per_peer_state = self.per_peer_state.read().unwrap();
+ log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
+ let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
/// [`ChannelManager`].
pub fn provided_init_features(_config: &UserConfig) -> InitFeatures {
// Note that if new features are added here which other peers may (eventually) require, we
- // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for
- // ErroringMessageHandler.
+ // should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for
+ // [`ErroringMessageHandler`].
let mut features = InitFeatures::empty();
features.set_data_loss_protect_optional();
features.set_upfront_shutdown_script_optional();
impl Readable for ClaimableHTLC {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
- let mut prev_hop = crate::util::ser::OptionDeserWrapper(None);
+ let mut prev_hop = crate::util::ser::RequiredWrapper(None);
let mut value = 0;
let mut payment_data: Option<msgs::FinalOnionHopData> = None;
let mut cltv_expiry = 0;
let id: u8 = Readable::read(reader)?;
match id {
0 => {
- let mut session_priv: crate::util::ser::OptionDeserWrapper<SecretKey> = crate::util::ser::OptionDeserWrapper(None);
+ let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
let mut first_hop_htlc_msat: u64 = 0;
- let mut path = Some(Vec::new());
+ let mut path: Option<Vec<RouteHop>> = Some(Vec::new());
let mut payment_id = None;
let mut payment_secret = None;
- let mut payment_params = None;
+ let mut payment_params: Option<PaymentParameters> = None;
read_tlv_fields!(reader, {
(0, session_priv, required),
(1, payment_id, option),
(2, first_hop_htlc_msat, required),
(3, payment_secret, option),
(4, path, vec_type),
- (5, payment_params, option),
+ (5, payment_params, (option: ReadableArgs, 0)),
});
if payment_id.is_none() {
// For backwards compat, if there was no payment_id written, use the session_priv bytes
// instead.
payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
}
+ if path.is_none() || path.as_ref().unwrap().is_empty() {
+ return Err(DecodeError::InvalidValue);
+ }
+ let path = path.unwrap();
+ if let Some(params) = payment_params.as_mut() {
+ if params.final_cltv_expiry_delta == 0 {
+ params.final_cltv_expiry_delta = path.last().unwrap().cltv_expiry_delta;
+ }
+ }
Ok(HTLCSource::OutboundRoute {
session_priv: session_priv.0.unwrap(),
first_hop_htlc_msat,
- path: path.unwrap(),
+ path,
payment_id: payment_id.unwrap(),
payment_secret,
- payment_params,
})
}
1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
impl Writeable for HTLCSource {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
match self {
- HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payment_params } => {
+ HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret } => {
0u8.write(writer)?;
let payment_id_opt = Some(payment_id);
write_tlv_fields!(writer, {
(2, first_hop_htlc_msat, required),
(3, payment_secret, option),
(4, *path, vec_type),
- (5, payment_params, option),
+ (5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
});
}
HTLCSource::PreviousHopData(ref field) => {
htlc_purposes.push(purpose);
}
+ let mut monitor_update_blocked_actions_per_peer = None;
+ let mut peer_states = Vec::new();
+ for (_, peer_state_mutex) in per_peer_state.iter() {
+ // Because we're holding the owning `per_peer_state` write lock here there's no chance
+ // of a lockorder violation deadlock - no other thread can be holding any
+ // per_peer_state lock at all.
+ peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
+ }
+
(serializable_peer_count).write(writer)?;
- for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
- let peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &*peer_state_lock;
+ for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
// Peers which we have no channels to should be dropped once disconnected. As we
// disconnect all peers when shutting down and serializing the ChannelManager, we
// consider all peers as disconnected here. There's therefore no need write peers with
if !peer_state.ok_to_remove(false) {
peer_pubkey.write(writer)?;
peer_state.latest_features.write(writer)?;
+ if !peer_state.monitor_update_blocked_actions.is_empty() {
+ monitor_update_blocked_actions_per_peer
+ .get_or_insert_with(Vec::new)
+ .push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
+ }
}
}
// LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
// map. Thus, if there are no entries we skip writing a TLV for it.
pending_claiming_payments = None;
- } else {
- debug_assert!(false, "While we have code to serialize pending_claiming_payments, the map should always be empty until a later PR");
}
write_tlv_fields!(writer, {
(3, pending_outbound_payments, required),
(4, pending_claiming_payments, option),
(5, self.our_network_pubkey, required),
+ (6, monitor_update_blocked_actions_per_peer, option),
(7, self.fake_scid_rand_bytes, required),
(9, htlc_purposes, vec_type),
(11, self.probing_cookie_secret, required),
channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
latest_features: Readable::read(reader)?,
pending_msg_events: Vec::new(),
+ monitor_update_blocked_actions: BTreeMap::new(),
is_connected: false,
};
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
let mut pending_claiming_payments = Some(HashMap::new());
+ let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(3, pending_outbound_payments, option),
(4, pending_claiming_payments, option),
(5, received_network_pubkey, option),
+ (6, monitor_update_blocked_actions_per_peer, option),
(7, fake_scid_rand_bytes, option),
(9, claimable_htlc_purposes, vec_type),
(11, probing_cookie_secret, option),
probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
}
+ if !channel_closures.is_empty() {
+ pending_events_read.append(&mut channel_closures);
+ }
+
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
pending_outbound_payments = Some(outbounds);
- } else {
+ }
+ let pending_outbounds = OutboundPayments {
+ pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
+ retry_lock: Mutex::new(())
+ };
+
+ {
// If we're tracking pending payments, ensure we haven't lost any by looking at the
// ChannelMonitor data for any channels for which we do not have authorative state
// (i.e. those for which we just force-closed above or we otherwise don't have a
// 0.0.102+
for (_, monitor) in args.channel_monitors.iter() {
if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
- for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
+ for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
if path.is_empty() {
log_error!(args.logger, "Got an empty path for a pending payment");
return Err(DecodeError::InvalidValue);
}
+
let path_amt = path.last().unwrap().fee_msat;
let mut session_priv_bytes = [0; 32];
session_priv_bytes[..].copy_from_slice(&session_priv[..]);
- match pending_outbound_payments.as_mut().unwrap().entry(payment_id) {
+ match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) {
hash_map::Entry::Occupied(mut entry) => {
let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
}
}
}
- for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
- if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
- let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
- info.prev_funding_outpoint == prev_hop_data.outpoint &&
- info.prev_htlc_id == prev_hop_data.htlc_id
- };
- // The ChannelMonitor is now responsible for this HTLC's
- // failure/success and will let us know what its outcome is. If we
- // still have an entry for this HTLC in `forward_htlcs` or
- // `pending_intercepted_htlcs`, we were apparently not persisted after
- // the monitor was when forwarding the payment.
- forward_htlcs.retain(|_, forwards| {
- forwards.retain(|forward| {
- if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
- if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
- log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- false
+ for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
+ match htlc_source {
+ HTLCSource::PreviousHopData(prev_hop_data) => {
+ let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
+ info.prev_funding_outpoint == prev_hop_data.outpoint &&
+ info.prev_htlc_id == prev_hop_data.htlc_id
+ };
+ // The ChannelMonitor is now responsible for this HTLC's
+ // failure/success and will let us know what its outcome is. If we
+ // still have an entry for this HTLC in `forward_htlcs` or
+ // `pending_intercepted_htlcs`, we were apparently not persisted after
+ // the monitor was when forwarding the payment.
+ forward_htlcs.retain(|_, forwards| {
+ forwards.retain(|forward| {
+ if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
+ if pending_forward_matches_htlc(&htlc_info) {
+ log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+ false
+ } else { true }
} else { true }
+ });
+ !forwards.is_empty()
+ });
+ pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
+ if pending_forward_matches_htlc(&htlc_info) {
+ log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+ pending_events_read.retain(|event| {
+ if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
+ intercepted_id != ev_id
+ } else { true }
+ });
+ false
} else { true }
});
- !forwards.is_empty()
- });
- pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
- if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
- log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- pending_events_read.retain(|event| {
- if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
- intercepted_id != ev_id
- } else { true }
- });
- false
- } else { true }
- });
+ },
+ HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
+ if let Some(preimage) = preimage_opt {
+ let pending_events = Mutex::new(pending_events_read);
+ // Note that we set `from_onchain` to "false" here,
+ // deliberately keeping the pending payment around forever.
+ // Given it should only occur when we have a channel we're
+ // force-closing for being stale that's okay.
+ // The alternative would be to wipe the state when claiming,
+ // generating a `PaymentPathSuccessful` event but regenerating
+ // it and the `PaymentSent` on every restart until the
+ // `ChannelMonitor` is removed.
+ pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger);
+ pending_events_read = pending_events.into_inner().unwrap();
+ }
+ },
}
}
}
}
}
- if !forward_htlcs.is_empty() {
+ if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
// If we have pending HTLCs to forward, assume we either dropped a
// `PendingHTLCsForwardable` or the user received it but never processed it as they
// shut down before the timer hit. Either way, set the time_forwardable to a small
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
- if !channel_closures.is_empty() {
- pending_events_read.append(&mut channel_closures);
- }
-
let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
Ok(key) => key,
Err(()) => return Err(DecodeError::InvalidValue)
}
}
+ for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
+ if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
+ peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
+ } else {
+ log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
let channel_manager = ChannelManager {
genesis_hash,
fee_estimator: bounded_fee_estimator,
inbound_payment_key: expanded_inbound_key,
pending_inbound_payments: Mutex::new(pending_inbound_payments),
- pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), retry_lock: Mutex::new(()), },
+ pending_outbound_payments: pending_outbounds,
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
// to connect messages with new values
chan.0.contents.fee_base_msat *= 2;
chan.1.contents.fee_base_msat *= 2;
- let node_a_chan_info = nodes[0].node.list_channels()[0].clone();
- let node_b_chan_info = nodes[1].node.list_channels()[0].clone();
+ let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
+ &nodes[1].node.get_our_node_id()).pop().unwrap();
+ let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
+ &nodes[0].node.get_our_node_id()).pop().unwrap();
// The first two nodes (which opened a channel) should now require fresh persistence
assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
// indicates there are more HTLCs coming.
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
- nodes[0].node.send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
expect_payment_failed!(nodes[0], our_payment_hash, true);
// Send the second half of the original MPP payment.
- nodes[0].node.send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
// To start (1), send a regular payment but don't claim it.
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV),
final_value_msat: 100_000,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10_000,
- final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10_000,
- final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
-
- assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
}
+ assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
+ }
+ {
// Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
// as it has the funding transaction.
let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
+ }
+ {
// At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
// `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
// from `nodes[0]` for the closing transaction with the proposed fee, the channel is
check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
}
+ #[test]
+ fn test_connection_limiting() {
+ // Test that we limit un-channel'd peers and un-funded channels properly.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Note that create_network connects the nodes together for us
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ let mut funding_tx = None;
+ for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+
+ if idx == 0 {
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
+ let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
+ funding_tx = Some(tx.clone());
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+ check_added_monitors!(nodes[1], 1);
+ let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+ check_added_monitors!(nodes[0], 1);
+ }
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ }
+
+ // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+ open_channel_msg.temporary_channel_id);
+
+ // Further, because all of our channels with nodes[0] are inbound, and none of them funded,
+ // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
+ // limit.
+ let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
+ for _ in 1..super::MAX_NO_CHANNEL_PEERS {
+ let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+ &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+ peer_pks.push(random_pk);
+ nodes[1].node.peer_connected(&random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ }
+ let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+ &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+ nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+
+ // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
+ // them if we have too many un-channel'd peers.
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
+ for ev in chan_closed_events {
+ if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
+ }
+ nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+
+ // but of course if the connection is outbound its allowed...
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
+ // Even though we accept one more connection from new peers, we won't actually let them
+ // open channels.
+ assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
+ for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
+ nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ }
+ nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+ assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
+ open_channel_msg.temporary_channel_id);
+
+ // Of course, however, outbound channels are always allowed
+ nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None).unwrap();
+ get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
+
+ // If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
+ // "protected" and can connect again.
+ mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ // Further, because the first channel was funded, we can open another channel with
+ // last_random_pk.
+ nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
+ }
+
+ #[test]
+ fn test_outbound_chans_unlimited() {
+ // Test that we never refuse an outbound channel even if a peer is unfuned-channel-limited
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Note that create_network connects the nodes together for us
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ }
+
+ // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
+ // rejected.
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+ open_channel_msg.temporary_channel_id);
+
+ // but we can still open an outbound channel.
+ nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
+
+ // but even with such an outbound channel, additional inbound channels will still fail.
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+ open_channel_msg.temporary_channel_id);
+ }
+
+ #[test]
+ fn test_0conf_limiting() {
+ // Tests that we properly limit inbound channels when we have the manual-channel-acceptance
+ // flag set and (sometimes) accept channels as 0conf.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let mut settings = test_default_channel_config();
+ settings.manually_accept_inbound_channels = true;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Note that create_network connects the nodes together for us
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ // First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge
+ for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
+ let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+ &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+ nodes[1].node.peer_connected(&random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+
+ nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
+ }
+ _ => panic!("Unexpected event"),
+ }
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ }
+
+ // If we try to accept a channel from another peer non-0conf it will fail.
+ let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+ &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+ nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
+ Err(APIError::APIMisuseError { err }) =>
+ assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
+ _ => panic!(),
+ }
+ }
+ _ => panic!("Unexpected event"),
+ }
+ assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
+ open_channel_msg.temporary_channel_id);
+
+ // ...however if we accept the same channel 0conf it should work just fine.
+ nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
+ }
+ _ => panic!("Unexpected event"),
+ }
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
+ }
+
#[cfg(anchors)]
#[test]
fn test_anchors_zero_fee_htlc_tx_fallback() {
_ => panic!("Unexpected event"),
}
- let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id());
+ let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
// Note that this is unrealistic as each payment send will require at least two fsync
// calls per node.
let network = bitcoin::Network::Testnet;
- let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
- let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a)));
+ let scorer = Mutex::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
let mut config: UserConfig = Default::default();
config.channel_handshake_config.minimum_depth = 1;
let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
network,
- best_block: BestBlock::from_genesis(network),
+ best_block: BestBlock::from_network(network),
});
let node_a_holder = NodeHolder { node: &node_a };
let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
network,
- best_block: BestBlock::from_genesis(network),
+ best_block: BestBlock::from_network(network),
});
let node_b_holder = NodeHolder { node: &node_b };
- node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }).unwrap();
- node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }).unwrap();
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: vec![tx],
};
Listen::block_connected(&node_a, &block, 1);
_ => panic!("Unexpected event"),
}
- let dummy_graph = NetworkGraph::new(genesis_hash, &logger_a);
+ let dummy_graph = NetworkGraph::new(network, &logger_a);
let mut payment_count: u64 = 0;
macro_rules! send_payment {
let usable_channels = $node_a.list_usable_channels();
let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
.with_features($node_b.invoice_features());
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let seed = [3u8; 32];
let keys_manager = KeysManager::new(&seed, 42, 42);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
- let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id());
+ let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
$node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
$node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
$node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
_ => panic!("Failed to generate claim event"),
}
- let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id());
+ let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
$node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
$node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));