X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=0dd2bc41de0f83bec4b2c15e16b8e5700c2aec13;hb=5c7af8c6d32dd4bb2b307ba29c68667a15cb8509;hp=a56793fa16d33a24258e63b679fa07d4c825c5d7;hpb=3bd00b943a3f3a101a77ad127b219dad91845aa6;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a56793fa..fe3320a1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -21,7 +21,7 @@ use bitcoin::blockdata::block::Header; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::ChainHash; use bitcoin::key::constants::SECRET_KEY_SIZE; -use bitcoin::network::constants::Network; +use bitcoin::network::Network; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; @@ -31,8 +31,9 @@ use bitcoin::secp256k1::{SecretKey,PublicKey}; use bitcoin::secp256k1::Secp256k1; use bitcoin::{secp256k1, Sequence}; -use crate::blinded_path::BlindedPath; -use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs}; +use crate::blinded_path::{BlindedPath, NodeIdLookUp}; +use crate::blinded_path::message::ForwardNode; +use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, ReceiveTlvs}; use crate::chain; use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; @@ -42,8 +43,10 @@ use crate::events; use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. -use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; +use crate::ln::inbound_payment; +use crate::ln::types::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; +use crate::ln::channel_state::ChannelDetails; use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::Bolt11InvoiceFeatures; @@ -55,19 +58,18 @@ use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING}; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration}; +use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration}; use crate::ln::wire::Encode; -use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder}; +use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice}; use crate::offers::invoice_error::InvoiceError; use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder}; -use crate::offers::merkle::SignError; use crate::offers::offer::{Offer, OfferBuilder}; use crate::offers::parse::Bolt12SemanticError; use crate::offers::refund::{Refund, RefundBuilder}; -use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message}; +use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction}; use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider}; -use crate::sign::ecdsa::WriteableEcdsaChannelSigner; +use crate::sign::ecdsa::EcdsaChannelSigner; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; @@ -75,6 +77,7 @@ use crate::util::string::UntrustedString; use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; use crate::util::logger::{Level, Logger, WithContext}; use crate::util::errors::APIError; + #[cfg(not(c_bindings))] use { crate::offers::offer::DerivedMetadata, @@ -102,7 +105,7 @@ use core::time::Duration; use core::ops::Deref; // Re-export this for use in the public API. -pub use crate::ln::outbound_payment::{PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; +pub use crate::ln::outbound_payment::{Bolt12PaymentError, PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; use crate::ln::script::ShutdownScript; // We hold various information about HTLC relay in the HTLC objects in Channel itself: @@ -155,6 +158,11 @@ pub enum PendingHTLCRouting { /// [`Event::PaymentClaimable::onion_fields`] as /// [`RecipientOnionFields::payment_metadata`]. payment_metadata: Option>, + /// The context of the payment included by the recipient in a blinded path, or `None` if a + /// blinded path was not used. + /// + /// Used in part to determine the [`events::PaymentPurpose`]. + payment_context: Option, /// CLTV expiry of the received HTLC. /// /// Used to track when we should expire pending HTLCs that go unclaimed. @@ -199,6 +207,8 @@ pub enum PendingHTLCRouting { /// For HTLCs received by LDK, these will ultimately bubble back up as /// [`RecipientOnionFields::custom_tlvs`]. custom_tlvs: Vec<(u64, Vec)>, + /// Set if this HTLC is the final hop in a multi-hop blinded path. + requires_blinded_error: bool, }, } @@ -220,6 +230,7 @@ impl PendingHTLCRouting { match self { Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure), Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode), + Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode), _ => None, } } @@ -295,6 +306,7 @@ pub(super) struct PendingAddHTLCInfo { // Note that this may be an outbound SCID alias for the associated channel. prev_short_channel_id: u64, prev_htlc_id: u64, + prev_channel_id: ChannelId, prev_funding_outpoint: OutPoint, prev_user_channel_id: u128, } @@ -335,6 +347,7 @@ pub(crate) struct HTLCPreviousHopData { incoming_packet_shared_secret: [u8; 32], phantom_shared_secret: Option<[u8; 32]>, blinded_failure: Option, + channel_id: ChannelId, // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards // channel with a preimage provided by the forward channel. @@ -375,7 +388,7 @@ struct ClaimableHTLC { impl From<&ClaimableHTLC> for events::ClaimedHTLC { fn from(val: &ClaimableHTLC) -> Self { events::ClaimedHTLC { - channel_id: val.prev_hop.outpoint.to_channel_id(), + channel_id: val.prev_hop.channel_id, user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0), cltv_expiry: val.cltv_expiry, value_msat: val.value, @@ -623,7 +636,7 @@ impl MsgHandleErrInternal { err: msg, action: msgs::ErrorAction::IgnoreError, }, - ChannelError::Close(msg) => LightningError { + ChannelError::Close((msg, _reason)) => LightningError { err: msg.clone(), action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -668,6 +681,7 @@ struct ClaimingPayment { receiver_node_id: PublicKey, htlcs: Vec, sender_intended_value: Option, + onion_fields: Option, } impl_writeable_tlv_based!(ClaimingPayment, { (0, amount_msat, required), @@ -675,6 +689,7 @@ impl_writeable_tlv_based!(ClaimingPayment, { (4, receiver_node_id, required), (5, htlcs, optional_vec), (7, sender_intended_value, option), + (9, onion_fields, option), }); struct ClaimablePayment { @@ -714,7 +729,7 @@ enum BackgroundEvent { /// /// Note that any such events are lost on shutdown, so in general they must be updates which /// are regenerated on startup. - ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)), + ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)), /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the /// channel to continue normal operation. /// @@ -728,6 +743,7 @@ enum BackgroundEvent { MonitorUpdateRegeneratedOnStartup { counterparty_node_id: PublicKey, funding_txo: OutPoint, + channel_id: ChannelId, update: ChannelMonitorUpdate }, /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have @@ -756,7 +772,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// outbound edge. EmitEventAndFreeOtherChannel { event: events::Event, - downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>, + downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>, }, /// Indicates we should immediately resume the operation of another channel, unless there is /// some other reason why the channel is blocked. In practice this simply means immediately @@ -774,6 +790,7 @@ pub(crate) enum MonitorUpdateCompletionAction { downstream_counterparty_node_id: PublicKey, downstream_funding_outpoint: OutPoint, blocking_action: RAAMonitorUpdateBlockingAction, + downstream_channel_id: ChannelId, }, } @@ -785,6 +802,9 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (0, downstream_counterparty_node_id, required), (2, downstream_funding_outpoint, required), (4, blocking_action, required), + // Note that by the time we get past the required read above, downstream_funding_outpoint will be + // filled in, so we can safely unwrap it here. + (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))), }, (2, EmitEventAndFreeOtherChannel) => { (0, event, upgradable_required), @@ -802,12 +822,16 @@ pub(crate) enum EventCompletionAction { ReleaseRAAChannelMonitorUpdate { counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, + channel_id: ChannelId, }, } impl_writeable_tlv_based_enum!(EventCompletionAction, (0, ReleaseRAAChannelMonitorUpdate) => { (0, channel_funding_outpoint, required), (2, counterparty_node_id, required), + // Note that by the time we get past the required read above, channel_funding_outpoint will be + // filled in, so we can safely unwrap it here. + (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))), }; ); @@ -829,7 +853,7 @@ pub(crate) enum RAAMonitorUpdateBlockingAction { impl RAAMonitorUpdateBlockingAction { fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self { Self::ForwardedPaymentInboundClaim { - channel_id: prev_hop.outpoint.to_channel_id(), + channel_id: prev_hop.channel_id, htlc_id: prev_hop.htlc_id, } } @@ -888,7 +912,7 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// The peer is currently connected (i.e. we've seen a /// [`ChannelMessageHandler::peer_connected`] and no corresponding /// [`ChannelMessageHandler::peer_disconnected`]. - is_connected: bool, + pub is_connected: bool, } impl PeerState where SP::Target: SignerProvider { @@ -899,7 +923,16 @@ impl PeerState where SP::Target: SignerProvider { if require_disconnected && self.is_connected { return false } - self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0 + !self.channel_by_id.iter().any(|(_, phase)| + match phase { + ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true, + ChannelPhase::UnfundedInboundV1(_) => false, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(_) => true, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedInboundV2(_) => false, + } + ) && self.monitor_update_blocked_actions.is_empty() && self.in_flight_monitor_updates.is_empty() } @@ -929,6 +962,11 @@ pub(super) struct InboundChannelRequest { /// accepted. An unaccepted channel that exceeds this limit will be abandoned. const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2; +/// The number of blocks of historical feerate estimates we keep around and consider when deciding +/// to force-close a channel for having too-low fees. Also the number of blocks we have to see +/// after startup before we consider force-closing channels for having too-low fees. +pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144; + /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is /// actually ours and not some duplicate HTLC sent to us by a node along the route. /// @@ -971,6 +1009,7 @@ pub type SimpleArcChannelManager = ChannelManager< Arc>>, Arc, + Arc, Arc>>, Arc>>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer>>, Arc>, @@ -1001,6 +1040,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = &'e DefaultRouter< &'f NetworkGraph<&'g L>, &'g L, + &'c KeysManager, &'h RwLock, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L> @@ -1029,8 +1069,8 @@ pub trait AChannelManager { type NodeSigner: NodeSigner + ?Sized; /// A type that may be dereferenced to [`Self::NodeSigner`]. type NS: Deref; - /// A type implementing [`WriteableEcdsaChannelSigner`]. - type Signer: WriteableEcdsaChannelSigner + Sized; + /// A type implementing [`EcdsaChannelSigner`]. + type Signer: EcdsaChannelSigner + Sized; /// A type implementing [`SignerProvider`] for [`Self::Signer`]. type SignerProvider: SignerProvider + ?Sized; /// A type that may be dereferenced to [`Self::SignerProvider`]. @@ -1083,11 +1123,631 @@ where fn get_cm(&self) -> &ChannelManager { self } } -/// Manager which keeps track of a number of channels and sends messages to the appropriate -/// channel, also tracking HTLC preimages and forwarding onion packets appropriately. +/// A lightning node's channel state machine and payment management logic, which facilitates +/// sending, forwarding, and receiving payments through lightning channels. +/// +/// [`ChannelManager`] is parameterized by a number of components to achieve this. +/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each +/// channel +/// - [`BroadcasterInterface`] for broadcasting transactions related to opening, funding, and +/// closing channels +/// - [`EntropySource`] for providing random data needed for cryptographic operations +/// - [`NodeSigner`] for cryptographic operations scoped to the node +/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels +/// - [`FeeEstimator`] to determine transaction fee rates needed to have a transaction mined in a +/// timely manner +/// - [`Router`] for finding payment paths when initiating and retrying payments +/// - [`Logger`] for logging operational information of varying degrees +/// +/// Additionally, it implements the following traits: +/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers +/// - [`MessageSendEventsProvider`] to similarly send such messages to peers +/// - [`OffersMessageHandler`] for BOLT 12 message handling and sending +/// - [`EventsProvider`] to generate user-actionable [`Event`]s +/// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity +/// +/// Thus, [`ChannelManager`] is typically used to parameterize a [`MessageHandler`] and an +/// [`OnionMessenger`]. The latter is required to support BOLT 12 functionality. +/// +/// # `ChannelManager` vs `ChannelMonitor` +/// +/// It's important to distinguish between the *off-chain* management and *on-chain* enforcement of +/// lightning channels. [`ChannelManager`] exchanges messages with peers to manage the off-chain +/// state of each channel. During this process, it generates a [`ChannelMonitor`] for each channel +/// and a [`ChannelMonitorUpdate`] for each relevant change, notifying its parameterized +/// [`chain::Watch`] of them. +/// +/// An implementation of [`chain::Watch`], such as [`ChainMonitor`], is responsible for aggregating +/// these [`ChannelMonitor`]s and applying any [`ChannelMonitorUpdate`]s to them. It then monitors +/// for any pertinent on-chain activity, enforcing claims as needed. +/// +/// This division of off-chain management and on-chain enforcement allows for interesting node +/// setups. For instance, on-chain enforcement could be moved to a separate host or have added +/// redundancy, possibly as a watchtower. See [`chain::Watch`] for the relevant interface. +/// +/// # Initialization +/// +/// Use [`ChannelManager::new`] with the most recent [`BlockHash`] when creating a fresh instance. +/// Otherwise, if restarting, construct [`ChannelManagerReadArgs`] with the necessary parameters and +/// references to any deserialized [`ChannelMonitor`]s that were previously persisted. Use this to +/// deserialize the [`ChannelManager`] and feed it any new chain data since it was last online, as +/// detailed in the [`ChannelManagerReadArgs`] documentation. +/// +/// ``` +/// use bitcoin::BlockHash; +/// use bitcoin::network::Network; +/// use lightning::chain::BestBlock; +/// # use lightning::chain::channelmonitor::ChannelMonitor; +/// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs}; +/// # use lightning::routing::gossip::NetworkGraph; +/// use lightning::util::config::UserConfig; +/// use lightning::util::ser::ReadableArgs; +/// +/// # fn read_channel_monitors() -> Vec> { vec![] } +/// # fn example< +/// # 'a, +/// # L: lightning::util::logger::Logger, +/// # ES: lightning::sign::EntropySource, +/// # S: for <'b> lightning::routing::scoring::LockableScore<'b, ScoreLookUp = SL>, +/// # SL: lightning::routing::scoring::ScoreLookUp, +/// # SP: Sized, +/// # R: lightning::io::Read, +/// # >( +/// # fee_estimator: &dyn lightning::chain::chaininterface::FeeEstimator, +/// # chain_monitor: &dyn lightning::chain::Watch, +/// # tx_broadcaster: &dyn lightning::chain::chaininterface::BroadcasterInterface, +/// # router: &lightning::routing::router::DefaultRouter<&NetworkGraph<&'a L>, &'a L, &ES, &S, SP, SL>, +/// # logger: &L, +/// # entropy_source: &ES, +/// # node_signer: &dyn lightning::sign::NodeSigner, +/// # signer_provider: &lightning::sign::DynSignerProvider, +/// # best_block: lightning::chain::BestBlock, +/// # current_timestamp: u32, +/// # mut reader: R, +/// # ) -> Result<(), lightning::ln::msgs::DecodeError> { +/// // Fresh start with no channels +/// let params = ChainParameters { +/// network: Network::Bitcoin, +/// best_block, +/// }; +/// let default_config = UserConfig::default(); +/// let channel_manager = ChannelManager::new( +/// fee_estimator, chain_monitor, tx_broadcaster, router, logger, entropy_source, node_signer, +/// signer_provider, default_config, params, current_timestamp +/// ); +/// +/// // Restart from deserialized data +/// let mut channel_monitors = read_channel_monitors(); +/// let args = ChannelManagerReadArgs::new( +/// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, +/// router, logger, default_config, channel_monitors.iter_mut().collect() +/// ); +/// let (block_hash, channel_manager) = +/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _>)>::read(&mut reader, args)?; +/// +/// // Update the ChannelManager and ChannelMonitors with the latest chain data +/// // ... +/// +/// // Move the monitors to the ChannelManager's chain::Watch parameter +/// for monitor in channel_monitors { +/// chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); +/// } +/// # Ok(()) +/// # } +/// ``` +/// +/// # Operation +/// +/// The following is required for [`ChannelManager`] to function properly: +/// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically +/// called by [`PeerManager::read_event`] when processing network I/O) +/// - Send messages to peers obtained via its [`MessageSendEventsProvider`] implementation +/// (typically initiated when [`PeerManager::process_events`] is called) +/// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation +/// as documented by those traits +/// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly +/// every minute +/// - Persist to disk whenever [`get_and_clear_needs_persistence`] returns `true` using a +/// [`Persister`] such as a [`KVStore`] implementation +/// - Handle [`Event`]s obtained via its [`EventsProvider`] implementation +/// +/// The [`Future`] returned by [`get_event_or_persistence_needed_future`] is useful in determining +/// when the last two requirements need to be checked. +/// +/// The [`lightning-block-sync`] and [`lightning-transaction-sync`] crates provide utilities that +/// simplify feeding in on-chain activity using the [`chain::Listen`] and [`chain::Confirm`] traits, +/// respectively. The remaining requirements can be met using the [`lightning-background-processor`] +/// crate. For languages other than Rust, the availability of similar utilities may vary. +/// +/// # Channels +/// +/// [`ChannelManager`]'s primary function involves managing a channel state. Without channels, +/// payments can't be sent. Use [`list_channels`] or [`list_usable_channels`] for a snapshot of the +/// currently open channels. +/// +/// ``` +/// # use lightning::ln::channelmanager::AChannelManager; +/// # +/// # fn example(channel_manager: T) { +/// # let channel_manager = channel_manager.get_cm(); +/// let channels = channel_manager.list_usable_channels(); +/// for details in channels { +/// println!("{:?}", details); +/// } +/// # } +/// ``` +/// +/// Each channel is identified using a [`ChannelId`], which will change throughout the channel's +/// life cycle. Additionally, channels are assigned a `user_channel_id`, which is given in +/// [`Event`]s associated with the channel and serves as a fixed identifier but is otherwise unused +/// by [`ChannelManager`]. +/// +/// ## Opening Channels +/// +/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of +/// opening an outbound channel, which requires self-funding when handling +/// [`Event::FundingGenerationReady`]. +/// +/// ``` +/// # use bitcoin::{ScriptBuf, Transaction}; +/// # use bitcoin::secp256k1::PublicKey; +/// # use lightning::ln::channelmanager::AChannelManager; +/// # use lightning::events::{Event, EventsProvider}; +/// # +/// # trait Wallet { +/// # fn create_funding_transaction( +/// # &self, _amount_sats: u64, _output_script: ScriptBuf +/// # ) -> Transaction; +/// # } +/// # +/// # fn example(channel_manager: T, wallet: W, peer_id: PublicKey) { +/// # let channel_manager = channel_manager.get_cm(); +/// let value_sats = 1_000_000; +/// let push_msats = 10_000_000; +/// match channel_manager.create_channel(peer_id, value_sats, push_msats, 42, None, None) { +/// Ok(channel_id) => println!("Opening channel {}", channel_id), +/// Err(e) => println!("Error opening channel: {:?}", e), +/// } +/// +/// // On the event processing thread once the peer has responded +/// channel_manager.process_pending_events(&|event| match event { +/// Event::FundingGenerationReady { +/// temporary_channel_id, counterparty_node_id, channel_value_satoshis, output_script, +/// user_channel_id, .. +/// } => { +/// assert_eq!(user_channel_id, 42); +/// let funding_transaction = wallet.create_funding_transaction( +/// channel_value_satoshis, output_script +/// ); +/// match channel_manager.funding_transaction_generated( +/// &temporary_channel_id, &counterparty_node_id, funding_transaction +/// ) { +/// Ok(()) => println!("Funding channel {}", temporary_channel_id), +/// Err(e) => println!("Error funding channel {}: {:?}", temporary_channel_id, e), +/// } +/// }, +/// Event::ChannelPending { channel_id, user_channel_id, former_temporary_channel_id, .. } => { +/// assert_eq!(user_channel_id, 42); +/// println!( +/// "Channel {} now {} pending (funding transaction has been broadcasted)", channel_id, +/// former_temporary_channel_id.unwrap() +/// ); +/// }, +/// Event::ChannelReady { channel_id, user_channel_id, .. } => { +/// assert_eq!(user_channel_id, 42); +/// println!("Channel {} ready", channel_id); +/// }, +/// // ... +/// # _ => {}, +/// }); +/// # } +/// ``` +/// +/// ## Accepting Channels +/// +/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`] +/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be +/// either accepted or rejected when handling [`Event::OpenChannelRequest`]. +/// +/// ``` +/// # use bitcoin::secp256k1::PublicKey; +/// # use lightning::ln::channelmanager::AChannelManager; +/// # use lightning::events::{Event, EventsProvider}; +/// # +/// # fn is_trusted(counterparty_node_id: PublicKey) -> bool { +/// # // ... +/// # unimplemented!() +/// # } +/// # +/// # fn example(channel_manager: T) { +/// # let channel_manager = channel_manager.get_cm(); +/// # let error_message = "Channel force-closed"; +/// channel_manager.process_pending_events(&|event| match event { +/// Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { +/// if !is_trusted(counterparty_node_id) { +/// match channel_manager.force_close_without_broadcasting_txn( +/// &temporary_channel_id, &counterparty_node_id, error_message.to_string() +/// ) { +/// Ok(()) => println!("Rejecting channel {}", temporary_channel_id), +/// Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e), +/// } +/// return; +/// } +/// +/// let user_channel_id = 43; +/// match channel_manager.accept_inbound_channel( +/// &temporary_channel_id, &counterparty_node_id, user_channel_id +/// ) { +/// Ok(()) => println!("Accepting channel {}", temporary_channel_id), +/// Err(e) => println!("Error accepting channel {}: {:?}", temporary_channel_id, e), +/// } +/// }, +/// // ... +/// # _ => {}, +/// }); +/// # } +/// ``` +/// +/// ## Closing Channels +/// +/// There are two ways to close a channel: either cooperatively using [`close_channel`] or +/// unilaterally using [`force_close_broadcasting_latest_txn`]. The former is ideal as it makes for +/// lower fees and immediate access to funds. However, the latter may be necessary if the +/// counterparty isn't behaving properly or has gone offline. [`Event::ChannelClosed`] is generated +/// once the channel has been closed successfully. +/// +/// ``` +/// # use bitcoin::secp256k1::PublicKey; +/// # use lightning::ln::types::ChannelId; +/// # use lightning::ln::channelmanager::AChannelManager; +/// # use lightning::events::{Event, EventsProvider}; +/// # +/// # fn example( +/// # channel_manager: T, channel_id: ChannelId, counterparty_node_id: PublicKey +/// # ) { +/// # let channel_manager = channel_manager.get_cm(); +/// match channel_manager.close_channel(&channel_id, &counterparty_node_id) { +/// Ok(()) => println!("Closing channel {}", channel_id), +/// Err(e) => println!("Error closing channel {}: {:?}", channel_id, e), +/// } +/// +/// // On the event processing thread +/// channel_manager.process_pending_events(&|event| match event { +/// Event::ChannelClosed { channel_id, user_channel_id, .. } => { +/// assert_eq!(user_channel_id, 42); +/// println!("Channel {} closed", channel_id); +/// }, +/// // ... +/// # _ => {}, +/// }); +/// # } +/// ``` +/// +/// # Payments +/// +/// [`ChannelManager`] is responsible for sending, forwarding, and receiving payments through its +/// channels. A payment is typically initiated from a [BOLT 11] invoice or a [BOLT 12] offer, though +/// spontaneous (i.e., keysend) payments are also possible. Incoming payments don't require +/// maintaining any additional state as [`ChannelManager`] can reconstruct the [`PaymentPreimage`] +/// from the [`PaymentSecret`]. Sending payments, however, require tracking in order to retry failed +/// HTLCs. +/// +/// After a payment is initiated, it will appear in [`list_recent_payments`] until a short time +/// after either an [`Event::PaymentSent`] or [`Event::PaymentFailed`] is handled. Failed HTLCs +/// for a payment will be retried according to the payment's [`Retry`] strategy or until +/// [`abandon_payment`] is called. +/// +/// ## BOLT 11 Invoices /// -/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through -/// to individual Channels. +/// The [`lightning-invoice`] crate is useful for creating BOLT 11 invoices. Specifically, use the +/// functions in its `utils` module for constructing invoices that are compatible with +/// [`ChannelManager`]. These functions serve as a convenience for building invoices with the +/// [`PaymentHash`] and [`PaymentSecret`] returned from [`create_inbound_payment`]. To provide your +/// own [`PaymentHash`], use [`create_inbound_payment_for_hash`] or the corresponding functions in +/// the [`lightning-invoice`] `utils` module. +/// +/// [`ChannelManager`] generates an [`Event::PaymentClaimable`] once the full payment has been +/// received. Call [`claim_funds`] to release the [`PaymentPreimage`], which in turn will result in +/// an [`Event::PaymentClaimed`]. +/// +/// ``` +/// # use lightning::events::{Event, EventsProvider, PaymentPurpose}; +/// # use lightning::ln::channelmanager::AChannelManager; +/// # +/// # fn example(channel_manager: T) { +/// # let channel_manager = channel_manager.get_cm(); +/// // Or use utils::create_invoice_from_channelmanager +/// let known_payment_hash = match channel_manager.create_inbound_payment( +/// Some(10_000_000), 3600, None +/// ) { +/// Ok((payment_hash, _payment_secret)) => { +/// println!("Creating inbound payment {}", payment_hash); +/// payment_hash +/// }, +/// Err(()) => panic!("Error creating inbound payment"), +/// }; +/// +/// // On the event processing thread +/// channel_manager.process_pending_events(&|event| match event { +/// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose { +/// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => { +/// assert_eq!(payment_hash, known_payment_hash); +/// println!("Claiming payment {}", payment_hash); +/// channel_manager.claim_funds(payment_preimage); +/// }, +/// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: None, .. } => { +/// println!("Unknown payment hash: {}", payment_hash); +/// }, +/// PaymentPurpose::SpontaneousPayment(payment_preimage) => { +/// assert_ne!(payment_hash, known_payment_hash); +/// println!("Claiming spontaneous payment {}", payment_hash); +/// channel_manager.claim_funds(payment_preimage); +/// }, +/// // ... +/// # _ => {}, +/// }, +/// Event::PaymentClaimed { payment_hash, amount_msat, .. } => { +/// assert_eq!(payment_hash, known_payment_hash); +/// println!("Claimed {} msats", amount_msat); +/// }, +/// // ... +/// # _ => {}, +/// }); +/// # } +/// ``` +/// +/// For paying an invoice, [`lightning-invoice`] provides a `payment` module with convenience +/// functions for use with [`send_payment`]. +/// +/// ``` +/// # use lightning::events::{Event, EventsProvider}; +/// # use lightning::ln::types::PaymentHash; +/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry}; +/// # use lightning::routing::router::RouteParameters; +/// # +/// # fn example( +/// # channel_manager: T, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, +/// # route_params: RouteParameters, retry: Retry +/// # ) { +/// # let channel_manager = channel_manager.get_cm(); +/// // let (payment_hash, recipient_onion, route_params) = +/// // payment::payment_parameters_from_invoice(&invoice); +/// let payment_id = PaymentId([42; 32]); +/// match channel_manager.send_payment( +/// payment_hash, recipient_onion, payment_id, route_params, retry +/// ) { +/// Ok(()) => println!("Sending payment with hash {}", payment_hash), +/// Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e), +/// } +/// +/// let expected_payment_id = payment_id; +/// let expected_payment_hash = payment_hash; +/// assert!( +/// channel_manager.list_recent_payments().iter().find(|details| matches!( +/// details, +/// RecentPaymentDetails::Pending { +/// payment_id: expected_payment_id, +/// payment_hash: expected_payment_hash, +/// .. +/// } +/// )).is_some() +/// ); +/// +/// // On the event processing thread +/// channel_manager.process_pending_events(&|event| match event { +/// Event::PaymentSent { payment_hash, .. } => println!("Paid {}", payment_hash), +/// Event::PaymentFailed { payment_hash, .. } => println!("Failed paying {}", payment_hash), +/// // ... +/// # _ => {}, +/// }); +/// # } +/// ``` +/// +/// ## BOLT 12 Offers +/// +/// The [`offers`] module is useful for creating BOLT 12 offers. An [`Offer`] is a precursor to a +/// [`Bolt12Invoice`], which must first be requested by the payer. The interchange of these messages +/// as defined in the specification is handled by [`ChannelManager`] and its implementation of +/// [`OffersMessageHandler`]. However, this only works with an [`Offer`] created using a builder +/// returned by [`create_offer_builder`]. With this approach, BOLT 12 offers and invoices are +/// stateless just as BOLT 11 invoices are. +/// +/// ``` +/// # use lightning::events::{Event, EventsProvider, PaymentPurpose}; +/// # use lightning::ln::channelmanager::AChannelManager; +/// # use lightning::offers::parse::Bolt12SemanticError; +/// # +/// # fn example(channel_manager: T) -> Result<(), Bolt12SemanticError> { +/// # let channel_manager = channel_manager.get_cm(); +/// # let absolute_expiry = None; +/// let offer = channel_manager +/// .create_offer_builder(absolute_expiry)? +/// # ; +/// # // Needed for compiling for c_bindings +/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into(); +/// # let offer = builder +/// .description("coffee".to_string()) +/// .amount_msats(10_000_000) +/// .build()?; +/// let bech32_offer = offer.to_string(); +/// +/// // On the event processing thread +/// channel_manager.process_pending_events(&|event| match event { +/// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose { +/// PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(payment_preimage), .. } => { +/// println!("Claiming payment {}", payment_hash); +/// channel_manager.claim_funds(payment_preimage); +/// }, +/// PaymentPurpose::Bolt12OfferPayment { payment_preimage: None, .. } => { +/// println!("Unknown payment hash: {}", payment_hash); +/// }, +/// // ... +/// # _ => {}, +/// }, +/// Event::PaymentClaimed { payment_hash, amount_msat, .. } => { +/// println!("Claimed {} msats", amount_msat); +/// }, +/// // ... +/// # _ => {}, +/// }); +/// # Ok(()) +/// # } +/// ``` +/// +/// Use [`pay_for_offer`] to initiated payment, which sends an [`InvoiceRequest`] for an [`Offer`] +/// and pays the [`Bolt12Invoice`] response. In addition to success and failure events, +/// [`ChannelManager`] may also generate an [`Event::InvoiceRequestFailed`]. +/// +/// ``` +/// # use lightning::events::{Event, EventsProvider}; +/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry}; +/// # use lightning::offers::offer::Offer; +/// # +/// # fn example( +/// # channel_manager: T, offer: &Offer, quantity: Option, amount_msats: Option, +/// # payer_note: Option, retry: Retry, max_total_routing_fee_msat: Option +/// # ) { +/// # let channel_manager = channel_manager.get_cm(); +/// let payment_id = PaymentId([42; 32]); +/// match channel_manager.pay_for_offer( +/// offer, quantity, amount_msats, payer_note, payment_id, retry, max_total_routing_fee_msat +/// ) { +/// Ok(()) => println!("Requesting invoice for offer"), +/// Err(e) => println!("Unable to request invoice for offer: {:?}", e), +/// } +/// +/// // First the payment will be waiting on an invoice +/// let expected_payment_id = payment_id; +/// assert!( +/// channel_manager.list_recent_payments().iter().find(|details| matches!( +/// details, +/// RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id } +/// )).is_some() +/// ); +/// +/// // Once the invoice is received, a payment will be sent +/// assert!( +/// channel_manager.list_recent_payments().iter().find(|details| matches!( +/// details, +/// RecentPaymentDetails::Pending { payment_id: expected_payment_id, .. } +/// )).is_some() +/// ); +/// +/// // On the event processing thread +/// channel_manager.process_pending_events(&|event| match event { +/// Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id), +/// Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id), +/// Event::InvoiceRequestFailed { payment_id, .. } => println!("Failed paying {}", payment_id), +/// // ... +/// # _ => {}, +/// }); +/// # } +/// ``` +/// +/// ## BOLT 12 Refunds +/// +/// A [`Refund`] is a request for an invoice to be paid. Like *paying* for an [`Offer`], *creating* +/// a [`Refund`] involves maintaining state since it represents a future outbound payment. +/// Therefore, use [`create_refund_builder`] when creating one, otherwise [`ChannelManager`] will +/// refuse to pay any corresponding [`Bolt12Invoice`] that it receives. +/// +/// ``` +/// # use core::time::Duration; +/// # use lightning::events::{Event, EventsProvider}; +/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry}; +/// # use lightning::offers::parse::Bolt12SemanticError; +/// # +/// # fn example( +/// # channel_manager: T, amount_msats: u64, absolute_expiry: Duration, retry: Retry, +/// # max_total_routing_fee_msat: Option +/// # ) -> Result<(), Bolt12SemanticError> { +/// # let channel_manager = channel_manager.get_cm(); +/// let payment_id = PaymentId([42; 32]); +/// let refund = channel_manager +/// .create_refund_builder( +/// amount_msats, absolute_expiry, payment_id, retry, max_total_routing_fee_msat +/// )? +/// # ; +/// # // Needed for compiling for c_bindings +/// # let builder: lightning::offers::refund::RefundBuilder<_> = refund.into(); +/// # let refund = builder +/// .description("coffee".to_string()) +/// .payer_note("refund for order 1234".to_string()) +/// .build()?; +/// let bech32_refund = refund.to_string(); +/// +/// // First the payment will be waiting on an invoice +/// let expected_payment_id = payment_id; +/// assert!( +/// channel_manager.list_recent_payments().iter().find(|details| matches!( +/// details, +/// RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id } +/// )).is_some() +/// ); +/// +/// // Once the invoice is received, a payment will be sent +/// assert!( +/// channel_manager.list_recent_payments().iter().find(|details| matches!( +/// details, +/// RecentPaymentDetails::Pending { payment_id: expected_payment_id, .. } +/// )).is_some() +/// ); +/// +/// // On the event processing thread +/// channel_manager.process_pending_events(&|event| match event { +/// Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id), +/// Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id), +/// // ... +/// # _ => {}, +/// }); +/// # Ok(()) +/// # } +/// ``` +/// +/// Use [`request_refund_payment`] to send a [`Bolt12Invoice`] for receiving the refund. Similar to +/// *creating* an [`Offer`], this is stateless as it represents an inbound payment. +/// +/// ``` +/// # use lightning::events::{Event, EventsProvider, PaymentPurpose}; +/// # use lightning::ln::channelmanager::AChannelManager; +/// # use lightning::offers::refund::Refund; +/// # +/// # fn example(channel_manager: T, refund: &Refund) { +/// # let channel_manager = channel_manager.get_cm(); +/// let known_payment_hash = match channel_manager.request_refund_payment(refund) { +/// Ok(invoice) => { +/// let payment_hash = invoice.payment_hash(); +/// println!("Requesting refund payment {}", payment_hash); +/// payment_hash +/// }, +/// Err(e) => panic!("Unable to request payment for refund: {:?}", e), +/// }; +/// +/// // On the event processing thread +/// channel_manager.process_pending_events(&|event| match event { +/// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose { +/// PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(payment_preimage), .. } => { +/// assert_eq!(payment_hash, known_payment_hash); +/// println!("Claiming payment {}", payment_hash); +/// channel_manager.claim_funds(payment_preimage); +/// }, +/// PaymentPurpose::Bolt12RefundPayment { payment_preimage: None, .. } => { +/// println!("Unknown payment hash: {}", payment_hash); +/// }, +/// // ... +/// # _ => {}, +/// }, +/// Event::PaymentClaimed { payment_hash, amount_msat, .. } => { +/// assert_eq!(payment_hash, known_payment_hash); +/// println!("Claimed {} msats", amount_msat); +/// }, +/// // ... +/// # _ => {}, +/// }); +/// # } +/// ``` +/// +/// # Persistence /// /// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for /// all peers during write/read (though does not modify this instance, only the instance being @@ -1108,12 +1768,16 @@ where /// tells you the last block hash which was connected. You should get the best block tip before using the manager. /// See [`chain::Listen`] and [`chain::Confirm`] for more details. /// +/// # `ChannelUpdate` Messages +/// /// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating /// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been /// offline for a full minute. In order to track this, you must call /// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect. /// +/// # DoS Mitigation +/// /// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and /// inbound channels without confirmed funding transactions. This may result in nodes which we do /// not have a channel with being unable to connect to us or open new channels with us if we have @@ -1123,19 +1787,53 @@ where /// exempted from the count of unfunded channels. Similarly, outbound channels and connections are /// never limited. Please ensure you limit the count of such channels yourself. /// +/// # Type Aliases +/// /// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`] /// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but /// essentially you should default to using a [`SimpleRefChannelManager`], and use a /// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when /// you're using lightning-net-tokio. /// +/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor +/// [`MessageHandler`]: crate::ln::peer_handler::MessageHandler +/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger +/// [`PeerManager::read_event`]: crate::ln::peer_handler::PeerManager::read_event +/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events +/// [`timer_tick_occurred`]: Self::timer_tick_occurred +/// [`get_and_clear_needs_persistence`]: Self::get_and_clear_needs_persistence +/// [`Persister`]: crate::util::persist::Persister +/// [`KVStore`]: crate::util::persist::KVStore +/// [`get_event_or_persistence_needed_future`]: Self::get_event_or_persistence_needed_future +/// [`lightning-block-sync`]: https://docs.rs/lightning_block_sync/latest/lightning_block_sync +/// [`lightning-transaction-sync`]: https://docs.rs/lightning_transaction_sync/latest/lightning_transaction_sync +/// [`lightning-background-processor`]: https://docs.rs/lightning_background_processor/lightning_background_processor +/// [`list_channels`]: Self::list_channels +/// [`list_usable_channels`]: Self::list_usable_channels +/// [`create_channel`]: Self::create_channel +/// [`close_channel`]: Self::force_close_broadcasting_latest_txn +/// [`force_close_broadcasting_latest_txn`]: Self::force_close_broadcasting_latest_txn +/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md +/// [BOLT 12]: https://github.com/rustyrussell/lightning-rfc/blob/guilt/offers/12-offer-encoding.md +/// [`list_recent_payments`]: Self::list_recent_payments +/// [`abandon_payment`]: Self::abandon_payment +/// [`lightning-invoice`]: https://docs.rs/lightning_invoice/latest/lightning_invoice +/// [`create_inbound_payment`]: Self::create_inbound_payment +/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash +/// [`claim_funds`]: Self::claim_funds +/// [`send_payment`]: Self::send_payment +/// [`offers`]: crate::offers +/// [`create_offer_builder`]: Self::create_offer_builder +/// [`pay_for_offer`]: Self::pay_for_offer +/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest +/// [`create_refund_builder`]: Self::create_refund_builder +/// [`request_refund_payment`]: Self::request_refund_payment /// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected /// [`funding_created`]: msgs::FundingCreated /// [`funding_transaction_generated`]: Self::funding_transaction_generated /// [`BlockHash`]: bitcoin::hash_types::BlockHash /// [`update_channel`]: chain::Watch::update_channel /// [`ChannelUpdate`]: msgs::ChannelUpdate -/// [`timer_tick_occurred`]: Self::timer_tick_occurred /// [`read`]: ReadableArgs::read // // Lock order: @@ -1155,6 +1853,8 @@ where // | | // | |__`pending_intercepted_htlcs` // | +// |__`decode_update_add_htlcs` +// | // |__`per_peer_state` // | // |__`pending_inbound_payments` @@ -1245,6 +1945,18 @@ where /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, + /// SCID/SCID Alias -> pending `update_add_htlc`s to decode. + /// + /// Note that because we may have an SCID Alias as the key we can have two entries per channel, + /// though in practice we probably won't be receiving HTLCs for a channel both via the alias + /// and via the classic SCID. + /// + /// Note that no consistency guarantees are made about the existence of a channel with the + /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`! + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. + decode_update_add_htlcs: Mutex>>, + /// The sets of payments which are claimable or currently being claimed. See /// [`ClaimablePayments`]' individual field docs for more info. /// @@ -1388,6 +2100,24 @@ where pending_offers_messages: Mutex>>, + /// Tracks the message events that are to be broadcasted when we are connected to some peer. + pending_broadcast_messages: Mutex>, + + /// We only want to force-close our channels on peers based on stale feerates when we're + /// confident the feerate on the channel is *really* stale, not just became stale recently. + /// Thus, we store the fee estimates we had as of the last [`FEERATE_TRACKING_BLOCKS`] blocks + /// (after startup completed) here, and only force-close when channels have a lower feerate + /// than we predicted any time in the last [`FEERATE_TRACKING_BLOCKS`] blocks. + /// + /// We only keep this in memory as we assume any feerates we receive immediately after startup + /// may be bunk (as they often are if Bitcoin Core crashes) and want to delay taking any + /// actions for a day anyway. + /// + /// The first element in the pair is the + /// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] estimate, the second the + /// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`] estimate. + last_days_feerates: Mutex>, + entropy_source: ES, node_signer: NS, signer_provider: SP, @@ -1578,330 +2308,18 @@ const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50; /// many peers we reject new (inbound) connections. const MAX_NO_CHANNEL_PEERS: usize = 250; -/// Information needed for constructing an invoice route hint for this channel. -#[derive(Clone, Debug, PartialEq)] -pub struct CounterpartyForwardingInfo { - /// Base routing fee in millisatoshis. - pub fee_base_msat: u32, - /// Amount in millionths of a satoshi the channel will charge per transferred satoshi. - pub fee_proportional_millionths: u32, - /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart, - /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s - /// `cltv_expiry_delta` for more details. - pub cltv_expiry_delta: u16, -} - -/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`] -/// to better separate parameters. -#[derive(Clone, Debug, PartialEq)] -pub struct ChannelCounterparty { - /// The node_id of our counterparty - pub node_id: PublicKey, - /// The Features the channel counterparty provided upon last connection. - /// Useful for routing as it is the most up-to-date copy of the counterparty's features and - /// many routing-relevant features are present in the init context. - pub features: InitFeatures, - /// The value, in satoshis, that must always be held in the channel for our counterparty. This - /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by - /// claiming at least this value on chain. - /// - /// This value is not included in [`inbound_capacity_msat`] as it can never be spent. - /// - /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat - pub unspendable_punishment_reserve: u64, - /// Information on the fees and requirements that the counterparty requires when forwarding - /// payments to us through this channel. - pub forwarding_info: Option, - /// The smallest value HTLC (in msat) the remote peer will accept, for this channel. This field - /// is only `None` before we have received either the `OpenChannel` or `AcceptChannel` message - /// from the remote peer, or for `ChannelCounterparty` objects serialized prior to LDK 0.0.107. - pub outbound_htlc_minimum_msat: Option, - /// The largest value HTLC (in msat) the remote peer currently will accept, for this channel. - pub outbound_htlc_maximum_msat: Option, -} - -/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`] -#[derive(Clone, Debug, PartialEq)] -pub struct ChannelDetails { - /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes, - /// thereafter this is the txid of the funding transaction xor the funding transaction output). - /// Note that this means this value is *not* persistent - it can change once during the - /// lifetime of the channel. - pub channel_id: ChannelId, - /// Parameters which apply to our counterparty. See individual fields for more information. - pub counterparty: ChannelCounterparty, - /// The Channel's funding transaction output, if we've negotiated the funding transaction with - /// our counterparty already. - /// - /// Note that, if this has been set, `channel_id` will be equivalent to - /// `funding_txo.unwrap().to_channel_id()`. - pub funding_txo: Option, - /// The features which this channel operates with. See individual features for more info. - /// - /// `None` until negotiation completes and the channel type is finalized. - pub channel_type: Option, - /// The position of the funding transaction in the chain. None if the funding transaction has - /// not yet been confirmed and the channel fully opened. - /// - /// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound - /// payments instead of this. See [`get_inbound_payment_scid`]. - /// - /// For channels with [`confirmations_required`] set to `Some(0)`, [`outbound_scid_alias`] may - /// be used in place of this in outbound routes. See [`get_outbound_payment_scid`]. - /// - /// [`inbound_scid_alias`]: Self::inbound_scid_alias - /// [`outbound_scid_alias`]: Self::outbound_scid_alias - /// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid - /// [`get_outbound_payment_scid`]: Self::get_outbound_payment_scid - /// [`confirmations_required`]: Self::confirmations_required - pub short_channel_id: Option, - /// An optional [`short_channel_id`] alias for this channel, randomly generated by us and - /// usable in place of [`short_channel_id`] to reference the channel in outbound routes when - /// the channel has not yet been confirmed (as long as [`confirmations_required`] is - /// `Some(0)`). - /// - /// This will be `None` as long as the channel is not available for routing outbound payments. - /// - /// [`short_channel_id`]: Self::short_channel_id - /// [`confirmations_required`]: Self::confirmations_required - pub outbound_scid_alias: Option, - /// An optional [`short_channel_id`] alias for this channel, randomly generated by our - /// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our - /// counterparty will recognize the alias provided here in place of the [`short_channel_id`] - /// when they see a payment to be routed to us. - /// - /// Our counterparty may choose to rotate this value at any time, though will always recognize - /// previous values for inbound payment forwarding. - /// - /// [`short_channel_id`]: Self::short_channel_id - pub inbound_scid_alias: Option, - /// The value, in satoshis, of this channel as appears in the funding output - pub channel_value_satoshis: u64, - /// The value, in satoshis, that must always be held in the channel for us. This value ensures - /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least - /// this value on chain. - /// - /// This value is not included in [`outbound_capacity_msat`] as it can never be spent. - /// - /// This value will be `None` for outbound channels until the counterparty accepts the channel. - /// - /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat - pub unspendable_punishment_reserve: Option, - /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. This may be zero for objects - /// serialized with LDK versions prior to 0.0.113. - /// - /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel - /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels - pub user_channel_id: u128, - /// The currently negotiated fee rate denominated in satoshi per 1000 weight units, - /// which is applied to commitment and HTLC transactions. - /// - /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115. - pub feerate_sat_per_1000_weight: Option, - /// Our total balance. This is the amount we would get if we close the channel. - /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this - /// amount is not likely to be recoverable on close. - /// - /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose - /// balance is not available for inclusion in new outbound HTLCs). This further does not include - /// any pending outgoing HTLCs which are awaiting some other resolution to be sent. - /// This does not consider any on-chain fees. - /// - /// See also [`ChannelDetails::outbound_capacity_msat`] - pub balance_msat: u64, - /// The available outbound capacity for sending HTLCs to the remote peer. This does not include - /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not - /// available for inclusion in new outbound HTLCs). This further does not include any pending - /// outgoing HTLCs which are awaiting some other resolution to be sent. - /// - /// See also [`ChannelDetails::balance_msat`] - /// - /// This value is not exact. Due to various in-flight changes, feerate changes, and our - /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we - /// should be able to spend nearly this amount. - pub outbound_capacity_msat: u64, - /// The available outbound capacity for sending a single HTLC to the remote peer. This is - /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by - /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us - /// to use a limit as close as possible to the HTLC limit we can currently send. - /// - /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`], - /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`]. - pub next_outbound_htlc_limit_msat: u64, - /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of - /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than - /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a - /// route which is valid. - pub next_outbound_htlc_minimum_msat: u64, - /// The available inbound capacity for the remote peer to send HTLCs to us. This does not - /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not - /// available for inclusion in new inbound HTLCs). - /// Note that there are some corner cases not fully handled here, so the actual available - /// inbound capacity may be slightly higher than this. - /// - /// This value is not exact. Due to various in-flight changes, feerate changes, and our - /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable. - /// However, our counterparty should be able to spend nearly this amount. - pub inbound_capacity_msat: u64, - /// The number of required confirmations on the funding transaction before the funding will be - /// considered "locked". This number is selected by the channel fundee (i.e. us if - /// [`is_outbound`] is *not* set), and can be selected for inbound channels with - /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with - /// [`ChannelHandshakeLimits::max_minimum_depth`]. - /// - /// This value will be `None` for outbound channels until the counterparty accepts the channel. - /// - /// [`is_outbound`]: ChannelDetails::is_outbound - /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth - /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth - pub confirmations_required: Option, - /// The current number of confirmations on the funding transaction. - /// - /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113. - pub confirmations: Option, - /// The number of blocks (after our commitment transaction confirms) that we will need to wait - /// until we can claim our funds after we force-close the channel. During this time our - /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty - /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any - /// time to claim our non-HTLC-encumbered funds. - /// - /// This value will be `None` for outbound channels until the counterparty accepts the channel. - pub force_close_spend_delay: Option, - /// True if the channel was initiated (and thus funded) by us. - pub is_outbound: bool, - /// True if the channel is confirmed, channel_ready messages have been exchanged, and the - /// channel is not currently being shut down. `channel_ready` message exchange implies the - /// required confirmation count has been reached (and we were connected to the peer at some - /// point after the funding transaction received enough confirmations). The required - /// confirmation count is provided in [`confirmations_required`]. - /// - /// [`confirmations_required`]: ChannelDetails::confirmations_required - pub is_channel_ready: bool, - /// The stage of the channel's shutdown. - /// `None` for `ChannelDetails` serialized on LDK versions prior to 0.0.116. - pub channel_shutdown_state: Option, - /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b) - /// the peer is connected, and (c) the channel is not currently negotiating a shutdown. - /// - /// This is a strict superset of `is_channel_ready`. - pub is_usable: bool, - /// True if this channel is (or will be) publicly-announced. - pub is_public: bool, - /// The smallest value HTLC (in msat) we will accept, for this channel. This field - /// is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.107 - pub inbound_htlc_minimum_msat: Option, - /// The largest value HTLC (in msat) we currently will accept, for this channel. - pub inbound_htlc_maximum_msat: Option, - /// Set of configurable parameters that affect channel operation. - /// - /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109. - pub config: Option, -} - -impl ChannelDetails { - /// Gets the current SCID which should be used to identify this channel for inbound payments. - /// This should be used for providing invoice hints or in any other context where our - /// counterparty will forward a payment to us. - /// - /// This is either the [`ChannelDetails::inbound_scid_alias`], if set, or the - /// [`ChannelDetails::short_channel_id`]. See those for more information. - pub fn get_inbound_payment_scid(&self) -> Option { - self.inbound_scid_alias.or(self.short_channel_id) - } - - /// Gets the current SCID which should be used to identify this channel for outbound payments. - /// This should be used in [`Route`]s to describe the first hop or in other contexts where - /// we're sending or forwarding a payment outbound over this channel. - /// - /// This is either the [`ChannelDetails::short_channel_id`], if set, or the - /// [`ChannelDetails::outbound_scid_alias`]. See those for more information. - pub fn get_outbound_payment_scid(&self) -> Option { - self.short_channel_id.or(self.outbound_scid_alias) - } - - fn from_channel_context( - context: &ChannelContext, best_block_height: u32, latest_features: InitFeatures, - fee_estimator: &LowerBoundedFeeEstimator - ) -> Self - where - SP::Target: SignerProvider, - F::Target: FeeEstimator - { - let balance = context.get_available_balances(fee_estimator); - let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - context.get_holder_counterparty_selected_channel_reserve_satoshis(); - ChannelDetails { - channel_id: context.channel_id(), - counterparty: ChannelCounterparty { - node_id: context.get_counterparty_node_id(), - features: latest_features, - unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: context.counterparty_forwarding_info(), - // Ensures that we have actually received the `htlc_minimum_msat` value - // from the counterparty through the `OpenChannel` or `AcceptChannel` - // message (as they are always the first message from the counterparty). - // Else `Channel::get_counterparty_htlc_minimum_msat` could return the - // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if context.have_received_message() { - Some(context.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: context.get_counterparty_htlc_maximum_msat(), - }, - funding_txo: context.get_funding_txo(), - // Note that accept_channel (or open_channel) is always the first message, so - // `have_received_message` indicates that type negotiation has completed. - channel_type: if context.have_received_message() { Some(context.get_channel_type().clone()) } else { None }, - short_channel_id: context.get_short_channel_id(), - outbound_scid_alias: if context.is_usable() { Some(context.outbound_scid_alias()) } else { None }, - inbound_scid_alias: context.latest_inbound_scid_alias(), - channel_value_satoshis: context.get_value_satoshis(), - feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()), - unspendable_punishment_reserve: to_self_reserve_satoshis, - balance_msat: balance.balance_msat, - inbound_capacity_msat: balance.inbound_capacity_msat, - outbound_capacity_msat: balance.outbound_capacity_msat, - next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, - next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat, - user_channel_id: context.get_user_id(), - confirmations_required: context.minimum_depth(), - confirmations: Some(context.get_funding_tx_confirmations(best_block_height)), - force_close_spend_delay: context.get_counterparty_selected_contest_delay(), - is_outbound: context.is_outbound(), - is_channel_ready: context.is_usable(), - is_usable: context.is_live(), - is_public: context.should_announce(), - inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(), - config: Some(context.config()), - channel_shutdown_state: Some(context.shutdown_state()), - } - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -/// Further information on the details of the channel shutdown. -/// Upon channels being forced closed (i.e. commitment transaction confirmation detected -/// by `ChainMonitor`), ChannelShutdownState will be set to `ShutdownComplete` or -/// the channel will be removed shortly. -/// Also note, that in normal operation, peers could disconnect at any of these states -/// and require peer re-connection before making progress onto other states -pub enum ChannelShutdownState { - /// Channel has not sent or received a shutdown message. - NotShuttingDown, - /// Local node has sent a shutdown message for this channel. - ShutdownInitiated, - /// Shutdown message exchanges have concluded and the channels are in the midst of - /// resolving all existing open HTLCs before closing can continue. - ResolvingHTLCs, - /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates. - NegotiatingClosingFee, - /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about - /// to drop the channel. - ShutdownComplete, -} +/// The maximum expiration from the current time where an [`Offer`] or [`Refund`] is considered +/// short-lived, while anything with a greater expiration is considered long-lived. +/// +/// Using [`ChannelManager::create_offer_builder`] or [`ChannelManager::create_refund_builder`], +/// will included a [`BlindedPath`] created using: +/// - [`MessageRouter::create_compact_blinded_paths`] when short-lived, and +/// - [`MessageRouter::create_blinded_paths`] when long-lived. +/// +/// Using compact [`BlindedPath`]s may provide better privacy as the [`MessageRouter`] could select +/// more hops. However, since they use short channel ids instead of pubkeys, they are more likely to +/// become invalid over time as channels are closed. Thus, they are only suitable for short-term use. +pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24); /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments. /// These include payments that have yet to find a successful path, or have unresolved HTLCs. @@ -1972,19 +2390,20 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => { - let mut msg_events = Vec::with_capacity(2); + let mut msg_event = None; if let Some((shutdown_res, update_option)) = shutdown_finish { let counterparty_node_id = shutdown_res.counterparty_node_id; let channel_id = shutdown_res.channel_id; let logger = WithContext::from( - &$self.logger, Some(counterparty_node_id), Some(channel_id), + &$self.logger, Some(counterparty_node_id), Some(channel_id), None ); log_error!(logger, "Force-closing channel: {}", err.err); $self.finish_close_channel(shutdown_res); if let Some(update) = update_option { - msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -1994,17 +2413,17 @@ macro_rules! handle_error { if let msgs::ErrorAction::IgnoreError = err.action { } else { - msg_events.push(events::MessageSendEvent::HandleError { + msg_event = Some(events::MessageSendEvent::HandleError { node_id: $counterparty_node_id, action: err.action.clone() }); } - if !msg_events.is_empty() { + if let Some(msg_event) = msg_event { let per_peer_state = $self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); - peer_state.pending_msg_events.append(&mut msg_events); + peer_state.pending_msg_events.push(msg_event); } } @@ -2047,11 +2466,10 @@ macro_rules! convert_chan_phase_err { ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id)) }, - ChannelError::Close(msg) => { - let logger = WithChannelContext::from(&$self.logger, &$channel.context); + ChannelError::Close((msg, reason)) => { + let logger = WithChannelContext::from(&$self.logger, &$channel.context, None); log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg); update_maps_on_chan_removal!($self, $channel.context); - let reason = ClosureReason::ProcessingError { err: msg.clone() }; let shutdown_res = $channel.context.force_shutdown(true, reason); let err = MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update); @@ -2076,6 +2494,14 @@ macro_rules! convert_chan_phase_err { ChannelPhase::UnfundedInboundV1(channel) => { convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL) }, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(channel) => { + convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL) + }, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedInboundV2(channel) => { + convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL) + }, } }; } @@ -2151,6 +2577,7 @@ macro_rules! emit_channel_pending_event { counterparty_node_id: $channel.context.get_counterparty_node_id(), user_channel_id: $channel.context.get_user_id(), funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(), + channel_type: Some($channel.context.get_channel_type().clone()), }, None)); $channel.context.set_channel_pending_event_emitted(); } @@ -2174,10 +2601,10 @@ macro_rules! emit_channel_ready_event { macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { - let logger = WithChannelContext::from(&$self.logger, &$chan.context); + let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); let mut updates = $chan.monitor_updating_restored(&&logger, &$self.node_signer, $self.chain_hash, &$self.default_configuration, - $self.best_block.read().unwrap().height()); + $self.best_block.read().unwrap().height); let counterparty_node_id = $chan.context.get_counterparty_node_id(); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a @@ -2196,9 +2623,9 @@ macro_rules! handle_monitor_update_completion { let update_actions = $peer_state.monitor_update_blocked_actions .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); - let htlc_forwards = $self.handle_channel_resumption( + let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, - updates.commitment_update, updates.order, updates.accepted_htlcs, + updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); if let Some(upd) = channel_update { @@ -2259,6 +2686,9 @@ macro_rules! handle_monitor_update_completion { if let Some(forwards) = htlc_forwards { $self.forward_htlcs(&mut [forwards][..]); } + if let Some(decode) = decode_update_add_htlcs { + $self.push_decode_update_add_htlcs(decode); + } $self.finalize_claims(updates.finalized_claimed_htlcs); for failure in updates.failed_htlcs.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; @@ -2270,7 +2700,7 @@ macro_rules! handle_monitor_update_completion { macro_rules! handle_new_monitor_update { ($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { { debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire)); - let logger = WithChannelContext::from(&$self.logger, &$chan.context); + let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); match $update_res { ChannelMonitorUpdateStatus::UnrecoverableError => { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; @@ -2431,14 +2861,15 @@ where best_block: RwLock::new(params.best_block), - outbound_scid_aliases: Mutex::new(HashSet::new()), - pending_inbound_payments: Mutex::new(HashMap::new()), + outbound_scid_aliases: Mutex::new(new_hash_set()), + pending_inbound_payments: Mutex::new(new_hash_map()), pending_outbound_payments: OutboundPayments::new(), - forward_htlcs: Mutex::new(HashMap::new()), - claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }), - pending_intercepted_htlcs: Mutex::new(HashMap::new()), - outpoint_to_peer: Mutex::new(HashMap::new()), - short_to_chan_info: FairRwLock::new(HashMap::new()), + forward_htlcs: Mutex::new(new_hash_map()), + decode_update_add_htlcs: Mutex::new(new_hash_map()), + claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }), + pending_intercepted_htlcs: Mutex::new(new_hash_map()), + outpoint_to_peer: Mutex::new(new_hash_map()), + short_to_chan_info: FairRwLock::new(new_hash_map()), our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(), secp_ctx, @@ -2450,7 +2881,7 @@ where highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize), - per_peer_state: FairRwLock::new(HashMap::new()), + per_peer_state: FairRwLock::new(new_hash_map()), pending_events: Mutex::new(VecDeque::new()), pending_events_processor: AtomicBool::new(false), @@ -2462,6 +2893,9 @@ where funding_batch_states: Mutex::new(BTreeMap::new()), pending_offers_messages: Mutex::new(Vec::new()), + pending_broadcast_messages: Mutex::new(Vec::new()), + + last_days_feerates: Mutex::new(VecDeque::new()), entropy_source, node_signer, @@ -2477,7 +2911,7 @@ where } fn create_and_insert_outbound_scid_alias(&self) -> u64 { - let height = self.best_block.read().unwrap().height(); + let height = self.best_block.read().unwrap().height; let mut outbound_scid_alias = 0; let mut i = 0; loop { @@ -2555,7 +2989,7 @@ where let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, - self.best_block.read().unwrap().height(), outbound_scid_alias, temporary_channel_id) + self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id) { Ok(res) => res, Err(e) => { @@ -2594,7 +3028,7 @@ where // the same channel. let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len()); { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -2627,7 +3061,7 @@ where // the same channel. let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len()); { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -2657,7 +3091,7 @@ where /// Gets the list of channels we have with a given counterparty, in random order. pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { @@ -2754,7 +3188,7 @@ where } } else { let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); - shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed)); + shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) })); } }, hash_map::Entry::Vacant(_) => { @@ -2849,7 +3283,7 @@ where } let logger = WithContext::from( - &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), + &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None ); log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail", @@ -2860,7 +3294,7 @@ where let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update { + if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { // There isn't anything we can do if we get an update failure - we're already // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to @@ -2923,9 +3357,9 @@ where let closure_reason = if let Some(peer_msg) = peer_msg { ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) } } else { - ClosureReason::HolderForceClosed + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) } }; - let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id)); + let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) { log_error!(logger, "Force-closing channel {}", channel_id); let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); @@ -2941,6 +3375,13 @@ where // Unfunded channel has no update (None, chan_phase.context().get_counterparty_node_id()) }, + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => { + self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason)); + // Unfunded channel has no update + (None, chan_phase.context().get_counterparty_node_id()) + }, } } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { log_error!(logger, "Force-closing channel {}", &channel_id); @@ -2953,24 +3394,21 @@ where } }; if let Some(update) = update_opt { - // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if - // not try to broadcast it via whatever peer we have. - let per_peer_state = self.per_peer_state.read().unwrap(); - let a_peer_state_opt = per_peer_state.get(peer_node_id) - .ok_or(per_peer_state.values().next()); - if let Ok(a_peer_state_mutex) = a_peer_state_opt { - let mut a_peer_state = a_peer_state_mutex.lock().unwrap(); - a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } + // If we have some Channel Update to broadcast, we cache it and broadcast it later. + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); } Ok(counterparty_node_id) } - fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { + fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String) + -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + log_debug!(self.logger, + "Force-closing channel, The error message sent to the peer : {}", error_message); match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) { Ok(counterparty_node_id) => { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -2979,8 +3417,8 @@ where peer_state.pending_msg_events.push( events::MessageSendEvent::HandleError { node_id: counterparty_node_id, - action: msgs::ErrorAction::DisconnectPeer { - msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }) + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message } }, } ); @@ -2991,40 +3429,211 @@ where } } - /// Force closes a channel, immediately broadcasting the latest local transaction(s) and - /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to - /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding - /// channel. - pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) + /// Force closes a channel, immediately broadcasting the latest local transaction(s), + /// rejecting new HTLCs. + /// + /// The provided `error_message` is sent to connected peers for closing + /// channels and should be a human-readable description of what went wrong. + /// + /// Fails if `channel_id` is unknown to the manager, or if the `counterparty_node_id` + /// isn't the counterparty of the corresponding channel. + pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String) -> Result<(), APIError> { - self.force_close_sending_error(channel_id, counterparty_node_id, true) + self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message) } /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting - /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the - /// `counterparty_node_id` isn't the counterparty of the corresponding channel. + /// the latest local transaction(s). /// - /// You can always get the latest local transaction(s) to broadcast from - /// [`ChannelMonitor::get_latest_holder_commitment_txn`]. - pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) + /// The provided `error_message` is sent to connected peers for closing channels and should + /// be a human-readable description of what went wrong. + /// + /// Fails if `channel_id` is unknown to the manager, or if the + /// `counterparty_node_id` isn't the counterparty of the corresponding channel. + /// You can always broadcast the latest local transaction(s) via + /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. + pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String) -> Result<(), APIError> { - self.force_close_sending_error(channel_id, counterparty_node_id, false) + self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message) } /// Force close all channels, immediately broadcasting the latest local commitment transaction /// for each to the chain and rejecting new HTLCs on each. - pub fn force_close_all_channels_broadcasting_latest_txn(&self) { + /// + /// The provided `error_message` is sent to connected peers for closing channels and should + /// be a human-readable description of what went wrong. + pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) { for chan in self.list_channels() { - let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id); + let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone()); } } /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest /// local transaction(s). - pub fn force_close_all_channels_without_broadcasting_txn(&self) { + /// + /// The provided `error_message` is sent to connected peers for closing channels and + /// should be a human-readable description of what went wrong. + pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) { for chan in self.list_channels() { - let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id); + let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone()); + } + } + + fn can_forward_htlc_to_outgoing_channel( + &self, chan: &mut Channel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails + ) -> Result<(), (&'static str, u16, Option)> { + if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { + // Note that the behavior here should be identical to the above block - we + // should NOT reveal the existence or non-existence of a private channel if + // we don't allow forwards outbound over them. + return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None)); + } + if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() { + // `option_scid_alias` (referred to in LDK as `scid_privacy`) means + // "refuse to forward unless the SCID alias was used", so we pretend + // we don't have the channel here. + return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None)); + } + + // Note that we could technically not return an error yet here and just hope + // that the connection is reestablished or monitor updated by the time we get + // around to doing the actual forward, but better to fail early if we can and + // hopefully an attacker trying to path-trace payments cannot make this occur + // on a small/per-node/per-channel scale. + if !chan.context.is_live() { // channel_disabled + // If the channel_update we're going to return is disabled (i.e. the + // peer has been disabled for some time), return `channel_disabled`, + // otherwise return `temporary_channel_failure`. + let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok(); + if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) { + return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt)); + } else { + return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt)); + } + } + if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum + let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok(); + return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); + } + if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) { + let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok(); + return Err((err, code, chan_update_opt)); + } + + Ok(()) + } + + /// Executes a callback `C` that returns some value `X` on the channel found with the given + /// `scid`. `None` is returned when the channel is not found. + fn do_funded_channel_callback) -> X>( + &self, scid: u64, callback: C, + ) -> Option { + let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() { + None => return None, + Some((cp_id, id)) => (cp_id, id), + }; + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if peer_state_mutex_opt.is_none() { + return None; + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.get_mut(&channel_id).and_then( + |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None } + ) { + None => None, + Some(chan) => Some(callback(chan)), + } + } + + fn can_forward_htlc( + &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails + ) -> Result<(), (&'static str, u16, Option)> { + match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel| { + self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) + }) { + Some(Ok(())) => {}, + Some(Err(e)) => return Err(e), + None => { + // If we couldn't find the channel info for the scid, it may be a phantom or + // intercept forward. + if (self.default_configuration.accept_intercept_htlcs && + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) || + fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash) + {} else { + return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + } + } + } + + let cur_height = self.best_block.read().unwrap().height + 1; + if let Err((err_msg, err_code)) = check_incoming_htlc_cltv( + cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry + ) { + let chan_update_opt = self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel| { + self.get_channel_update_for_onion(next_packet_details.outgoing_scid, chan).ok() + }).flatten(); + return Err((err_msg, err_code, chan_update_opt)); } + + Ok(()) + } + + fn htlc_failure_from_update_add_err( + &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str, + mut err_code: u16, chan_update: Option, is_intro_node_blinded_forward: bool, + shared_secret: &[u8; 32] + ) -> HTLCFailureMsg { + let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2)); + if chan_update.is_some() && err_code & 0x1000 == 0x1000 { + let chan_update = chan_update.unwrap(); + if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 { + msg.amount_msat.write(&mut res).expect("Writes cannot fail"); + } + else if err_code == 0x1000 | 13 { + msg.cltv_expiry.write(&mut res).expect("Writes cannot fail"); + } + else if err_code == 0x1000 | 20 { + // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 + 0u16.write(&mut res).expect("Writes cannot fail"); + } + (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail"); + msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail"); + chan_update.write(&mut res).expect("Writes cannot fail"); + } else if err_code & 0x1000 == 0x1000 { + // If we're trying to return an error that requires a `channel_update` but + // we're forwarding to a phantom or intercept "channel" (i.e. cannot + // generate an update), just use the generic "temporary_node_failure" + // instead. + err_code = 0x2000 | 2; + } + + log_info!( + WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)), + "Failed to accept/forward incoming HTLC: {}", err_msg + ); + // If `msg.blinding_point` is set, we must always fail with malformed. + if msg.blinding_point.is_some() { + return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + sha256_of_onion: [0; 32], + failure_code: INVALID_ONION_BLINDING, + }); + } + + let (err_code, err_data) = if is_intro_node_blinded_forward { + (INVALID_ONION_BLINDING, &[0; 32][..]) + } else { + (err_code, &res.0[..]) + }; + HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason: HTLCFailReason::reason(err_code, err_data.to_vec()) + .get_encrypted_failure_packet(shared_secret, &None), + }) } fn decode_update_add_htlc_onion( @@ -3036,48 +3645,7 @@ where msg, &self.node_signer, &self.logger, &self.secp_ctx )?; - let is_intro_node_forward = match next_hop { - onion_utils::Hop::Forward { - next_hop_data: msgs::InboundOnionPayload::BlindedForward { - intro_node_blinding_point: Some(_), .. - }, .. - } => true, - _ => false, - }; - - macro_rules! return_err { - ($msg: expr, $err_code: expr, $data: expr) => { - { - log_info!( - WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)), - "Failed to accept/forward incoming HTLC: {}", $msg - ); - // If `msg.blinding_point` is set, we must always fail with malformed. - if msg.blinding_point.is_some() { - return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, - })); - } - - let (err_code, err_data) = if is_intro_node_forward { - (INVALID_ONION_BLINDING, &[0; 32][..]) - } else { ($err_code, $data) }; - return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason: HTLCFailReason::reason(err_code, err_data.to_vec()) - .get_encrypted_failure_packet(&shared_secret, &None), - })); - } - } - } - - let NextPacketDetails { - next_packet_pubkey, outgoing_amt_msat, outgoing_scid, outgoing_cltv_value - } = match next_packet_details_opt { + let next_packet_details = match next_packet_details_opt { Some(next_packet_details) => next_packet_details, // it is a receive, so no need for outbound checks None => return Ok((next_hop, shared_secret, None)), @@ -3085,124 +3653,15 @@ where // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we // can't hold the outbound peer state lock at the same time as the inbound peer state lock. - if let Some((err, mut code, chan_update)) = loop { - let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned(); - let forwarding_chan_info_opt = match id_option { - None => { // unknown_next_peer - // Note that this is likely a timing oracle for detecting whether an scid is a - // phantom or an intercept. - if (self.default_configuration.accept_intercept_htlcs && - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) || - fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash) - { - None - } else { - break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); - } - }, - Some((cp_id, id)) => Some((cp_id.clone(), id.clone())), - }; - let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); - if peer_state_mutex_opt.is_none() { - break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); - } - let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); - let peer_state = &mut *peer_state_lock; - let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map( - |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None } - ).flatten() { - None => { - // Channel was removed. The short_to_chan_info and channel_by_id maps - // have no consistency guarantees. - break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); - }, - Some(chan) => chan - }; - if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { - // Note that the behavior here should be identical to the above block - we - // should NOT reveal the existence or non-existence of a private channel if - // we don't allow forwards outbound over them. - break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None)); - } - if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() { - // `option_scid_alias` (referred to in LDK as `scid_privacy`) means - // "refuse to forward unless the SCID alias was used", so we pretend - // we don't have the channel here. - break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None)); - } - let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok(); - - // Note that we could technically not return an error yet here and just hope - // that the connection is reestablished or monitor updated by the time we get - // around to doing the actual forward, but better to fail early if we can and - // hopefully an attacker trying to path-trace payments cannot make this occur - // on a small/per-node/per-channel scale. - if !chan.context.is_live() { // channel_disabled - // If the channel_update we're going to return is disabled (i.e. the - // peer has been disabled for some time), return `channel_disabled`, - // otherwise return `temporary_channel_failure`. - if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) { - break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt)); - } else { - break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt)); - } - } - if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum - break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); - } - if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) { - break Some((err, code, chan_update_opt)); - } - chan_update_opt - } else { - None - }; - - let cur_height = self.best_block.read().unwrap().height() + 1; - - if let Err((err_msg, code)) = check_incoming_htlc_cltv( - cur_height, outgoing_cltv_value, msg.cltv_expiry - ) { - if code & 0x1000 != 0 && chan_update_opt.is_none() { - // We really should set `incorrect_cltv_expiry` here but as we're not - // forwarding over a real channel we can't generate a channel_update - // for it. Instead we just return a generic temporary_node_failure. - break Some((err_msg, 0x2000 | 2, None)) - } - let chan_update_opt = if code & 0x1000 != 0 { chan_update_opt } else { None }; - break Some((err_msg, code, chan_update_opt)); - } + self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| { + let (err_msg, err_code, chan_update_opt) = e; + self.htlc_failure_from_update_add_err( + msg, counterparty_node_id, err_msg, err_code, chan_update_opt, + next_hop.is_intro_node_blinded_forward(), &shared_secret + ) + })?; - break None; - } - { - let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2)); - if let Some(chan_update) = chan_update { - if code == 0x1000 | 11 || code == 0x1000 | 12 { - msg.amount_msat.write(&mut res).expect("Writes cannot fail"); - } - else if code == 0x1000 | 13 { - msg.cltv_expiry.write(&mut res).expect("Writes cannot fail"); - } - else if code == 0x1000 | 20 { - // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 - 0u16.write(&mut res).expect("Writes cannot fail"); - } - (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail"); - msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail"); - chan_update.write(&mut res).expect("Writes cannot fail"); - } else if code & 0x1000 == 0x1000 { - // If we're trying to return an error that requires a `channel_update` but - // we're forwarding to a phantom or intercept "channel" (i.e. cannot - // generate an update), just use the generic "temporary_node_failure" - // instead. - code = 0x2000 | 2; - } - return_err!(err, code, &res.0[..]); - } - Ok((next_hop, shared_secret, Some(next_packet_pubkey))) + Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey))) } fn construct_pending_htlc_status<'a>( @@ -3213,7 +3672,7 @@ where macro_rules! return_err { ($msg: expr, $err_code: expr, $data: expr) => { { - let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)); + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)); log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); if msg.blinding_point.is_some() { return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed( @@ -3237,7 +3696,7 @@ where match decoded_hop { onion_utils::Hop::Receive(next_hop_data) => { // OUR PAYMENT! - let current_height: u32 = self.best_block.read().unwrap().height(); + let current_height: u32 = self.best_block.read().unwrap().height; match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat, current_height, self.default_configuration.accept_mpp_keysend) @@ -3282,7 +3741,7 @@ where if chan.context.get_short_channel_id().is_none() { return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}); } - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id()); self.get_channel_update_for_unicast(chan) } @@ -3299,7 +3758,7 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel) -> Result { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id()); let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), @@ -3310,7 +3769,7 @@ where } fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel) -> Result { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id()); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; @@ -3349,8 +3808,8 @@ where pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { let _lck = self.total_consistency_lock.read().unwrap(); self.send_payment_along_path(SendAlongPathArgs { - path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, - session_priv_bytes + path, payment_hash, recipient_onion: &recipient_onion, total_value, + cur_height, payment_id, keysend_preimage, session_priv_bytes }) } @@ -3368,7 +3827,7 @@ where &self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height, payment_hash, keysend_preimage, prng_seed ).map_err(|e| { - let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None); + let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash)); log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash); e })?; @@ -3376,14 +3835,14 @@ where let err: Result<(), _> = loop { let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) { None => { - let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None); + let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash)); log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash); return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}) }, Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), }; - let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id)); + let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id), Some(*payment_hash)); log_trace!(logger, "Attempting to send payment with payment hash {} along path with next hop {}", payment_hash, path.hops.first().unwrap().short_channel_id); @@ -3400,7 +3859,7 @@ where return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } let funding_txo = chan.context.get_funding_txo().unwrap(); - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash)); let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { path: path.clone(), @@ -3497,7 +3956,7 @@ where /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, @@ -3508,7 +3967,7 @@ where /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on /// `route_params` and retry failed payment paths based on `retry_strategy`. pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params, @@ -3519,7 +3978,7 @@ where #[cfg(test)] pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, @@ -3528,7 +3987,7 @@ where #[cfg(test)] pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result, PaymentSendFailure> { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height) } @@ -3537,8 +3996,37 @@ where self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata); } - pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> { - let best_block_height = self.best_block.read().unwrap().height(); + /// Pays the [`Bolt12Invoice`] associated with the `payment_id` encoded in its `payer_metadata`. + /// + /// The invoice's `payer_metadata` is used to authenticate that the invoice was indeed requested + /// before attempting a payment. [`Bolt12PaymentError::UnexpectedInvoice`] is returned if this + /// fails or if the encoded `payment_id` is not recognized. The latter may happen once the + /// payment is no longer tracked because the payment was attempted after: + /// - an invoice for the `payment_id` was already paid, + /// - one full [timer tick] has elapsed since initially requesting the invoice when paying an + /// offer, or + /// - the refund corresponding to the invoice has already expired. + /// + /// To retry the payment, request another invoice using a new `payment_id`. + /// + /// Attempting to pay the same invoice twice while the first payment is still pending will + /// result in a [`Bolt12PaymentError::DuplicateInvoice`]. + /// + /// Otherwise, either [`Event::PaymentSent`] or [`Event::PaymentFailed`] are used to indicate + /// whether or not the payment was successful. + /// + /// [timer tick]: Self::timer_tick_occurred + pub fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice) -> Result<(), Bolt12PaymentError> { + let secp_ctx = &self.secp_ctx; + let expanded_key = &self.inbound_payment_key; + match invoice.verify(expanded_key, secp_ctx) { + Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id), + Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice), + } + } + + fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> { + let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments .send_payment_for_bolt12_invoice( @@ -3595,7 +4083,7 @@ where /// /// [`send_payment`]: Self::send_payment pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.send_spontaneous_payment_with_route( route, payment_preimage, recipient_onion, payment_id, &self.entropy_source, @@ -3610,7 +4098,7 @@ where /// /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), @@ -3622,7 +4110,7 @@ where /// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows /// us to easily discern them from real payments. pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, @@ -3684,7 +4172,7 @@ where ProbeSendFailure::RouteNotFound })?; - let mut used_liquidity_map = HashMap::with_capacity(first_hops.len()); + let mut used_liquidity_map = hash_map_with_capacity(first_hops.len()); let mut res = Vec::new(); @@ -3748,7 +4236,7 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern, &Transaction) -> Result>( + fn funding_transaction_generated_intern, &Transaction) -> Result>( &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, is_batch_funding: bool, mut find_funding_output: FundingOutput, ) -> Result<(), APIError> { @@ -3761,26 +4249,37 @@ where let funding_txo; let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) { Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => { - funding_txo = find_funding_output(&chan, &funding_transaction)?; - - let logger = WithChannelContext::from(&self.logger, &chan.context); - let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger) - .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { - let channel_id = chan.context.channel_id(); - let reason = ClosureReason::ProcessingError { err: msg.clone() }; - let shutdown_res = chan.context.force_shutdown(false, reason); - (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)) - } else { unreachable!(); }); + macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { { + let counterparty; + let err = if let ChannelError::Close((msg, reason)) = $err { + let channel_id = $chan.context.channel_id(); + counterparty = chan.context.get_counterparty_node_id(); + let shutdown_res = $chan.context.force_shutdown(false, reason); + MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None) + } else { unreachable!(); }; + + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + let _: Result<(), _> = handle_error!(self, Err(err), counterparty); + Err($api_err) + } } } + match find_funding_output(&chan, &funding_transaction) { + Ok(found_funding_txo) => funding_txo = found_funding_txo, + Err(err) => { + let chan_err = ChannelError::close(err.to_owned()); + let api_err = APIError::APIMisuseError { err: err.to_owned() }; + return close_chan!(chan_err, api_err, chan); + }, + } + + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger); match funding_res { Ok(funding_msg) => (chan, funding_msg), - Err((chan, err)) => { - mem::drop(peer_state_lock); - mem::drop(per_peer_state); - let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id()); - return Err(APIError::ChannelUnavailable { - err: "Signer refused to sign the initial commitment transaction".to_owned() - }); - }, + Err((mut chan, chan_err)) => { + let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() }; + return close_chan!(chan_err, api_err, chan); + } } }, Some(phase) => { @@ -3885,7 +4384,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut result = Ok(()); - if !funding_transaction.is_coin_base() { + if !funding_transaction.is_coinbase() { for inp in funding_transaction.input.iter() { if inp.witness.is_empty() { result = result.and(Err(APIError::APIMisuseError { @@ -3900,7 +4399,7 @@ where })); } { - let height = self.best_block.read().unwrap().height(); + let height = self.best_block.read().unwrap().height; // Transactions are evaluated as final by network mempools if their locktime is strictly // lower than the next block height. However, the modules constituting our Lightning // node might not have perfect sync about their blockchain views. Thus, if the wallet @@ -3941,25 +4440,24 @@ where is_batch_funding, |chan, tx| { let mut output_index = None; - let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh(); + let expected_spk = chan.context.get_funding_redeemscript().to_p2wsh(); for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { + if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.context.get_value_satoshis() { if output_index.is_some() { - return Err(APIError::APIMisuseError { - err: "Multiple outputs matched the expected script and value".to_owned() - }); + return Err("Multiple outputs matched the expected script and value"); } output_index = Some(idx as u16); } } if output_index.is_none() { - return Err(APIError::APIMisuseError { - err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned() - }); + return Err("No output matched the script_pubkey and value in the FundingGenerationReady event"); } let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() }; if let Some(funding_batch_state) = funding_batch_state.as_mut() { - funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false)); + // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably + // need to fix this somehow to not rely on using the outpoint for the channel ID if we + // want to support V2 batching here as well. + funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false)); } Ok(outpoint) }) @@ -3983,11 +4481,20 @@ where for (channel_id, counterparty_node_id) in channels_to_remove { per_peer_state.get(&counterparty_node_id) .map(|peer_state_mutex| peer_state_mutex.lock().unwrap()) - .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id)) - .map(|mut chan| { + .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state))) + .map(|(mut chan, mut peer_state)| { update_maps_on_chan_removal!(self, &chan.context()); let closure_reason = ClosureReason::ProcessingError { err: e.clone() }; shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason)); + peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id, + data: "Failed to fund channel".to_owned(), + } + }, + }); }); } } @@ -4036,6 +4543,7 @@ where .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + for channel_id in channel_ids { if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { @@ -4052,7 +4560,8 @@ where } if let ChannelPhase::Funded(channel) = channel_phase { if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: channel.context.get_counterparty_node_id(), @@ -4152,7 +4661,7 @@ where None => { let error = format!("Channel with id {} not found for the passed counterparty node_id {}", next_hop_channel_id, next_node_id); - let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id)); + let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id), None); log_error!(logger, "{} when attempting to forward intercepted HTLC", error); return Err(APIError::ChannelUnavailable { err: error @@ -4184,6 +4693,7 @@ where let mut per_source_pending_forward = [( payment.prev_short_channel_id, payment.prev_funding_outpoint, + payment.prev_channel_id, payment.prev_user_channel_id, vec![(pending_htlc_info, payment.prev_htlc_id)] )]; @@ -4211,6 +4721,7 @@ where short_channel_id: payment.prev_short_channel_id, user_channel_id: Some(payment.prev_user_channel_id), outpoint: payment.prev_funding_outpoint, + channel_id: payment.prev_channel_id, htlc_id: payment.prev_htlc_id, incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -4225,6 +4736,145 @@ where Ok(()) } + fn process_pending_update_add_htlcs(&self) { + let mut decode_update_add_htlcs = new_hash_map(); + mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); + + let get_failed_htlc_destination = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { + if let Some(outgoing_scid) = outgoing_scid_opt { + match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) { + Some((outgoing_counterparty_node_id, outgoing_channel_id)) => + HTLCDestination::NextHopChannel { + node_id: Some(*outgoing_counterparty_node_id), + channel_id: *outgoing_channel_id, + }, + None => HTLCDestination::UnknownNextHop { + requested_forward_scid: outgoing_scid, + }, + } + } else { + HTLCDestination::FailedPayment { payment_hash } + } + }; + + 'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs { + let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel| { + let counterparty_node_id = chan.context.get_counterparty_node_id(); + let channel_id = chan.context.channel_id(); + let funding_txo = chan.context.get_funding_txo().unwrap(); + let user_channel_id = chan.context.get_user_id(); + let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs; + (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs) + }); + let ( + incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo, + incoming_user_channel_id, incoming_accept_underpaying_htlcs + ) = if let Some(incoming_channel_details) = incoming_channel_details_opt { + incoming_channel_details + } else { + // The incoming channel no longer exists, HTLCs should be resolved onchain instead. + continue; + }; + + let mut htlc_forwards = Vec::new(); + let mut htlc_fails = Vec::new(); + for update_add_htlc in &update_add_htlcs { + let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion( + &update_add_htlc, &self.node_signer, &self.logger, &self.secp_ctx + ) { + Ok(decoded_onion) => decoded_onion, + Err(htlc_fail) => { + htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion)); + continue; + }, + }; + + let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward(); + let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid); + + // Process the HTLC on the incoming channel. + match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel| { + let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash)); + chan.can_accept_incoming_htlc( + update_add_htlc, &self.fee_estimator, &logger, + ) + }) { + Some(Ok(_)) => {}, + Some(Err((err, code))) => { + let outgoing_chan_update_opt = if let Some(outgoing_scid) = outgoing_scid_opt.as_ref() { + self.do_funded_channel_callback(*outgoing_scid, |chan: &mut Channel| { + self.get_channel_update_for_onion(*outgoing_scid, chan).ok() + }).flatten() + } else { + None + }; + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, &incoming_counterparty_node_id, err, code, + outgoing_chan_update_opt, is_intro_node_blinded_forward, &shared_secret, + ); + let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, htlc_destination)); + continue; + }, + // The incoming channel no longer exists, HTLCs should be resolved onchain instead. + None => continue 'outer_loop, + } + + // Now process the HTLC on the outgoing channel if it's a forward. + if let Some(next_packet_details) = next_packet_details_opt.as_ref() { + if let Err((err, code, chan_update_opt)) = self.can_forward_htlc( + &update_add_htlc, next_packet_details + ) { + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, &incoming_counterparty_node_id, err, code, + chan_update_opt, is_intro_node_blinded_forward, &shared_secret, + ); + let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, htlc_destination)); + continue; + } + } + + match self.construct_pending_htlc_status( + &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop, + incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey), + ) { + PendingHTLCStatus::Forward(htlc_forward) => { + htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id)); + }, + PendingHTLCStatus::Fail(htlc_fail) => { + let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, htlc_destination)); + }, + } + } + + // Process all of the forwards and failures for the channel in which the HTLCs were + // proposed to as a batch. + let pending_forwards = (incoming_scid, incoming_funding_txo, incoming_channel_id, + incoming_user_channel_id, htlc_forwards.drain(..).collect()); + self.forward_htlcs_without_forward_event(&mut [pending_forwards]); + for (htlc_fail, htlc_destination) in htlc_fails.drain(..) { + let failure = match htlc_fail { + HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC { + htlc_id: fail_htlc.htlc_id, + err_packet: fail_htlc.reason, + }, + HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC { + htlc_id: fail_malformed_htlc.htlc_id, + sha256_of_onion: fail_malformed_htlc.sha256_of_onion, + failure_code: fail_malformed_htlc.failure_code, + }, + }; + self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_insert(vec![]).push(failure); + self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed { + prev_channel_id: incoming_channel_id, + failed_next_destination: htlc_destination, + }, None)); + } + } + } + /// Processes HTLCs which are pending waiting on random forward delay. /// /// Should only really ever be called in response to a PendingHTLCsForwardable event. @@ -4232,11 +4882,13 @@ where pub fn process_pending_htlc_forwards(&self) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + self.process_pending_update_add_htlcs(); + let mut new_events = VecDeque::new(); let mut failed_forwards = Vec::new(); - let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); + let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); { - let mut forward_htlcs = HashMap::new(); + let mut forward_htlcs = new_hash_map(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); for (short_chan_id, mut pending_forwards) in forward_htlcs { @@ -4247,20 +4899,21 @@ where for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, - forward_info: PendingHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, .. } }) => { macro_rules! failure_handler { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { - let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id())); + let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash)); log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4318,13 +4971,13 @@ where }; match next_hop { onion_utils::Hop::Receive(hop_data) => { - let current_height: u32 = self.best_block.read().unwrap().height(); + let current_height: u32 = self.best_block.read().unwrap().height; match create_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret), false, None, current_height, self.default_configuration.accept_mpp_keysend) { - Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), + Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])), Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, @@ -4365,22 +5018,24 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); for forward_info in pending_forwards.drain(..) { let queue_fail_htlc_res = match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, - forward_info: PendingHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { onion_packet, blinded, .. }, skimmed_fee_msat, .. }, }) => { + let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash)); log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4452,32 +5107,36 @@ where 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, - forward_info: PendingHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, skimmed_fee_msat, .. } }) => { let blinded_failure = routing.blinded_failure(); - let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing { + let (cltv_expiry, onion_payload, payment_data, payment_context, phantom_shared_secret, mut onion_fields) = match routing { PendingHTLCRouting::Receive { - payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, - custom_tlvs, requires_blinded_error: _ + payment_data, payment_metadata, payment_context, + incoming_cltv_expiry, phantom_shared_secret, custom_tlvs, + requires_blinded_error: _ } => { let _legacy_hop_data = Some(payment_data.clone()); let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), payment_metadata, custom_tlvs }; (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, - Some(payment_data), phantom_shared_secret, onion_fields) + Some(payment_data), payment_context, phantom_shared_secret, onion_fields) }, - PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => { + PendingHTLCRouting::ReceiveKeysend { + payment_data, payment_preimage, payment_metadata, + incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _ + } => { let onion_fields = RecipientOnionFields { payment_secret: payment_data.as_ref().map(|data| data.payment_secret), payment_metadata, custom_tlvs, }; (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), - payment_data, None, onion_fields) + payment_data, None, None, onion_fields) }, _ => { panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); @@ -4487,6 +5146,7 @@ where prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4513,11 +5173,12 @@ where debug_assert!(!committed_to_claimable); let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice( - &self.best_block.read().unwrap().height().to_be_bytes(), + &self.best_block.read().unwrap().height.to_be_bytes(), ); failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, user_channel_id: $htlc.prev_hop.user_channel_id, + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: $htlc.prev_hop.htlc_id, incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, @@ -4540,10 +5201,7 @@ where macro_rules! check_total_value { ($purpose: expr) => {{ let mut payment_claimable_generated = false; - let is_keysend = match $purpose { - events::PaymentPurpose::SpontaneousPayment(_) => true, - events::PaymentPurpose::InvoicePayment { .. } => false, - }; + let is_keysend = $purpose.is_keysend(); let mut claimable_payments = self.claimable_payments.lock().unwrap(); if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { fail_htlc!(claimable_htlc, payment_hash); @@ -4598,7 +5256,6 @@ where #[allow(unused_assignments)] { committed_to_claimable = true; } - let prev_channel_id = prev_funding_outpoint.to_channel_id(); htlcs.push(claimable_htlc); let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum(); htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat)); @@ -4651,17 +5308,18 @@ where } }; if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta { - let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64; + let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64; if (cltv_expiry as u64) < expected_min_expiry_height { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", &payment_hash, cltv_expiry, expected_min_expiry_height); fail_htlc!(claimable_htlc, payment_hash); } } - let purpose = events::PaymentPurpose::InvoicePayment { - payment_preimage: payment_preimage.clone(), - payment_secret: payment_data.payment_secret, - }; + let purpose = events::PaymentPurpose::from_parts( + payment_preimage, + payment_data.payment_secret, + payment_context, + ); check_total_value!(purpose); }, OnionPayload::Spontaneous(preimage) => { @@ -4684,10 +5342,11 @@ where &payment_hash, payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); fail_htlc!(claimable_htlc, payment_hash); } else { - let purpose = events::PaymentPurpose::InvoicePayment { - payment_preimage: inbound_payment.get().payment_preimage, - payment_secret: payment_data.payment_secret, - }; + let purpose = events::PaymentPurpose::from_parts( + inbound_payment.get().payment_preimage, + payment_data.payment_secret, + payment_context, + ); let payment_claimable_generated = check_total_value!(purpose); if payment_claimable_generated { inbound_payment.remove_entry(); @@ -4705,7 +5364,7 @@ where } } - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.pending_events, &self.logger, |args| self.send_payment_along_path(args)); @@ -4742,19 +5401,19 @@ where for event in background_events.drain(..) { match event { - BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => { + BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. let _ = self.chain_monitor.update_channel(funding_txo, &update); }, - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => { + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => { let mut updated_chan = false; { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) { + match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(mut chan_phase) => { if let ChannelPhase::Funded(chan) = chan_phase.get_mut() { updated_chan = true; @@ -4804,14 +5463,10 @@ where fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel, new_feerate: u32) -> NotifyOption { if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; } - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() { - if new_feerate != chan.context.get_feerate_sat_per_1000_weight() { - log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.", - chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); - } return NotifyOption::SkipPersistNoEvents; } if !chan.context.is_live() { @@ -4901,11 +5556,11 @@ where | { context.maybe_expire_prev_config(); if unfunded_context.should_expire_unfunded_channel() { - let logger = WithChannelContext::from(&self.logger, context); + let logger = WithChannelContext::from(&self.logger, context, None); log_error!(logger, "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id); update_maps_on_chan_removal!(self, &context); - shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed)); + shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) })); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { @@ -4957,7 +5612,8 @@ where if n >= DISABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Disabled); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -4971,7 +5627,8 @@ where if n >= ENABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Enabled); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -4986,7 +5643,7 @@ where chan.context.maybe_expire_prev_config(); if chan.should_disconnect_peer_awaiting_response() { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}", counterparty_node_id, chan_id); pending_msg_events.push(MessageSendEvent::HandleError { @@ -5010,12 +5667,22 @@ where process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events, counterparty_node_id) }, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedInboundV2(chan) => { + process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context, + pending_msg_events, counterparty_node_id) + }, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(chan) => { + process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context, + pending_msg_events, counterparty_node_id) + }, } }); for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() { if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 { - let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id)); + let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None); log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id); peer_state.pending_msg_events.push( events::MessageSendEvent::HandleError { @@ -5170,7 +5837,7 @@ where FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()), FailureCode::IncorrectOrUnknownPaymentDetails => { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); - htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes()); HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data) }, FailureCode::InvalidOnionPayload(data) => { @@ -5264,9 +5931,14 @@ where } } + fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { + let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination); + if push_forward_event { self.push_pending_forwards_ev(); } + } + /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. - fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { + fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool { // Ensure that no peer state channel storage lock is held when calling this function. // This ensures that future code doesn't introduce a lock-order requirement for // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling @@ -5284,19 +5956,19 @@ where // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. + let mut push_forward_event; match source { HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => { - if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, + push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx, - &self.pending_events, &self.logger) - { self.push_pending_forwards_ev(); } + &self.pending_events, &self.logger); }, HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, - ref phantom_shared_secret, ref outpoint, ref blinded_failure, .. + ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, .. }) => { log_trace!( - WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())), + WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)), "Failing {}HTLC with payment_hash {} backwards from us: {:?}", if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error ); @@ -5323,11 +5995,9 @@ where } }; - let mut push_forward_ev = false; + push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty(); let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - if forward_htlcs.is_empty() { - push_forward_ev = true; - } + push_forward_event &= forward_htlcs.is_empty(); match forward_htlcs.entry(*short_channel_id) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(failure); @@ -5337,14 +6007,14 @@ where } } mem::drop(forward_htlcs); - if push_forward_ev { self.push_pending_forwards_ev(); } let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::HTLCHandlingFailed { - prev_channel_id: outpoint.to_channel_id(), + prev_channel_id: *channel_id, failed_next_destination: destination, }, None)); }, } + push_forward_event } /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any @@ -5406,19 +6076,27 @@ where } } - let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); - let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat); - let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash, - ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(), - payment_purpose: payment.purpose, receiver_node_id, htlcs, sender_intended_value - }); - if dup_purpose.is_some() { - debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); - log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", - &payment_hash); - } + let claiming_payment = claimable_payments.pending_claiming_payments + .entry(payment_hash) + .and_modify(|_| { + debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); + log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", + &payment_hash); + }) + .or_insert_with(|| { + let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); + let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat); + ClaimingPayment { + amount_msat: payment.htlcs.iter().map(|source| source.value).sum(), + payment_purpose: payment.purpose, + receiver_node_id, + htlcs, + sender_intended_value, + onion_fields: payment.onion_fields, + } + }); - if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = payment.onion_fields { + if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = claiming_payment.onion_fields { if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) { log_info!(self.logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}", &payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0))); @@ -5481,7 +6159,7 @@ where } if valid_mpp { for htlc in sources.drain(..) { - let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id(); + let prev_hop_chan_id = htlc.prev_hop.channel_id; if let Err((pk, err)) = self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, |_, definitely_duplicate| { @@ -5492,7 +6170,7 @@ where if let msgs::ErrorAction::IgnoreError = err.err.action { // We got a temporary failure updating monitor, but will claim the // HTLC when the monitor updating is restored (or on chain). - let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id)); + let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash)); log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); } else { errs.push((pk, err)); } } @@ -5501,7 +6179,7 @@ where if !valid_mpp { for htlc in sources.drain(..) { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); - htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes()); let source = HTLCSource::PreviousHopData(htlc.prev_hop); let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data); let receiver = HTLCDestination::FailedPayment { payment_hash }; @@ -5534,7 +6212,7 @@ where { let per_peer_state = self.per_peer_state.read().unwrap(); - let chan_id = prev_hop.outpoint.to_channel_id(); + let chan_id = prev_hop.channel_id; let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()), None => None @@ -5551,7 +6229,7 @@ where if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let counterparty_node_id = chan.context.get_counterparty_node_id(); - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger); match fulfill_res { @@ -5572,6 +6250,7 @@ where BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo: prev_hop.outpoint, + channel_id: prev_hop.channel_id, update: monitor_update.clone(), }); } @@ -5586,13 +6265,13 @@ where log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}", chan_id, action); - let (node_id, funding_outpoint, blocker) = + let (node_id, _funding_outpoint, channel_id, blocker) = if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: node_id, downstream_funding_outpoint: funding_outpoint, - blocking_action: blocker, + blocking_action: blocker, downstream_channel_id: channel_id, } = action { - (node_id, funding_outpoint, blocker) + (node_id, funding_outpoint, channel_id, blocker) } else { debug_assert!(false, "Duplicate claims should always free another channel immediately"); @@ -5602,7 +6281,7 @@ where let mut peer_state = peer_state_mtx.lock().unwrap(); if let Some(blockers) = peer_state .actions_blocking_raa_monitor_updates - .get_mut(&funding_outpoint.to_channel_id()) + .get_mut(&channel_id) { let mut found_blocker = false; blockers.retain(|iter| { @@ -5631,6 +6310,7 @@ where updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage, }], + channel_id: Some(prev_hop.channel_id), }; if !during_init { @@ -5642,7 +6322,8 @@ where // with a preimage we *must* somehow manage to propagate it to the upstream // channel, or we must have an ability to receive the same event and try // again on restart. - log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id), None), + "Critical error: failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, update_res); } } else { @@ -5658,7 +6339,7 @@ where // complete the monitor update completion action from `completion_action`. self.pending_background_events.lock().unwrap().push( BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(( - prev_hop.outpoint, preimage_update, + prev_hop.outpoint, prev_hop.channel_id, preimage_update, ))); } // Note that we do process the completion action here. This totally could be a @@ -5675,8 +6356,9 @@ where } fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, - forwarded_htlc_value_msat: Option, from_onchain: bool, startup_replay: bool, - next_channel_counterparty_node_id: Option, next_channel_outpoint: OutPoint + forwarded_htlc_value_msat: Option, skimmed_fee_msat: Option, from_onchain: bool, + startup_replay: bool, next_channel_counterparty_node_id: Option, + next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option, ) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { @@ -5686,7 +6368,7 @@ where debug_assert_eq!(pubkey, path.hops[0].pubkey); } let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint: next_channel_outpoint, + channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id, counterparty_node_id: path.hops[0].pubkey, }; self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, @@ -5694,7 +6376,8 @@ where &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { - let prev_outpoint = hop_data.outpoint; + let prev_channel_id = hop_data.channel_id; + let prev_user_channel_id = hop_data.user_channel_id; let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); #[cfg(debug_assertions)] let claiming_chan_funding_outpoint = hop_data.outpoint; @@ -5702,7 +6385,7 @@ where |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = if let Some(node_id) = next_channel_counterparty_node_id { - Some((node_id, next_channel_outpoint, completed_blocker)) + Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker)) } else { // We can only get `None` here if we are processing a // `ChannelMonitor`-originated event, in which case we @@ -5739,7 +6422,7 @@ where }, // or the channel we'd unblock is already closed, BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup( - (funding_txo, monitor_update) + (funding_txo, _channel_id, monitor_update) ) => { if *funding_txo == next_channel_outpoint { assert_eq!(monitor_update.updates.len(), 1); @@ -5755,7 +6438,7 @@ where BackgroundEvent::MonitorUpdatesComplete { channel_id, .. } => - *channel_id == claiming_chan_funding_outpoint.to_channel_id(), + *channel_id == prev_channel_id, } }), "{:?}", *background_events); } @@ -5765,21 +6448,27 @@ where Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: other_chan.0, downstream_funding_outpoint: other_chan.1, - blocking_action: other_chan.2, + downstream_channel_id: other_chan.2, + blocking_action: other_chan.3, }) } else { None } } else { - let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { if let Some(claimed_htlc_value) = htlc_claim_value_msat { Some(claimed_htlc_value - forwarded_htlc_value) } else { None } } else { None }; + debug_assert!(skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat"); Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { event: events::Event::PaymentForwarded { - fee_earned_msat, + prev_channel_id: Some(prev_channel_id), + next_channel_id: Some(next_channel_id), + prev_user_channel_id, + next_user_channel_id, + total_fee_earned_msat, + skimmed_fee_msat, claim_from_onchain_tx: from_onchain, - prev_channel_id: Some(prev_outpoint.to_channel_id()), - next_channel_id: Some(next_channel_outpoint.to_channel_id()), outbound_amount_forwarded_msat: forwarded_htlc_value_msat, }, downstream_counterparty_and_funding_outpoint: chan_to_release, @@ -5814,6 +6503,7 @@ where receiver_node_id, htlcs, sender_intended_value: sender_intended_total_msat, + onion_fields, }) = payment { self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed { payment_hash, @@ -5822,6 +6512,7 @@ where receiver_node_id: Some(receiver_node_id), htlcs, sender_intended_total_msat, + onion_fields, }, None)); } }, @@ -5829,16 +6520,17 @@ where event, downstream_counterparty_and_funding_outpoint } => { self.pending_events.lock().unwrap().push_back((event, None)); - if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint { - self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker)); + if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint { + self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker)); } }, MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action, + downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action, } => { self.handle_monitor_update_release( downstream_counterparty_node_id, downstream_funding_outpoint, + downstream_channel_id, Some(blocking_action), ); }, @@ -5851,24 +6543,31 @@ where fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, channel: &mut Channel, raa: Option, commitment_update: Option, order: RAACommitmentOrder, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, + pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, + funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) - -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { - let logger = WithChannelContext::from(&self.logger, &channel.context); - log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", + -> (Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { + let logger = WithChannelContext::from(&self.logger, &channel.context, None); + log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement", &channel.context.channel_id(), if raa.is_some() { "an" } else { "no" }, - if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), + if commitment_update.is_some() { "a" } else { "no" }, + pending_forwards.len(), pending_update_adds.len(), if funding_broadcastable.is_some() { "" } else { "not " }, if channel_ready.is_some() { "sending" } else { "without" }, if announcement_sigs.is_some() { "sending" } else { "without" }); - let mut htlc_forwards = None; - let counterparty_node_id = channel.context.get_counterparty_node_id(); + let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()); + + let mut htlc_forwards = None; if !pending_forwards.is_empty() { - htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()), - channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards)); + htlc_forwards = Some((short_channel_id, channel.context.get_funding_txo().unwrap(), + channel.context.channel_id(), channel.context.get_user_id(), pending_forwards)); + } + let mut decode_update_add_htlcs = None; + if !pending_update_adds.is_empty() { + decode_update_add_htlcs = Some((short_channel_id, pending_update_adds)); } if let Some(msg) = channel_ready { @@ -5919,10 +6618,10 @@ where emit_channel_ready_event!(pending_events, channel); } - htlc_forwards + (htlc_forwards, decode_update_add_htlcs) } - fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) { + fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) { debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock let counterparty_node_id = match counterparty_node_id { @@ -5931,7 +6630,7 @@ where // TODO: Once we can rely on the counterparty_node_id from the // monitor event, this and the outpoint_to_peer map should be removed. let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap(); - match outpoint_to_peer.get(&funding_txo) { + match outpoint_to_peer.get(funding_txo) { Some(cp_id) => cp_id.clone(), None => return, } @@ -5944,11 +6643,11 @@ where peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; let channel = - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) { + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) { chan } else { let update_actions = peer_state.monitor_update_blocked_actions - .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new()); + .remove(&channel_id).unwrap_or(Vec::new()); mem::drop(peer_state_lock); mem::drop(per_peer_state); self.handle_monitor_update_completion_actions(update_actions); @@ -5959,11 +6658,11 @@ where pending.retain(|upd| upd.update_id > highest_applied_update_id); pending.len() } else { 0 }; - let logger = WithChannelContext::from(&self.logger, &channel.context); + let logger = WithChannelContext::from(&self.logger, &channel.context, None); log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.", highest_applied_update_id, channel.context.get_latest_monitor_update_id(), remaining_in_flight); - if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id { + if !channel.is_awaiting_monitor_update() || remaining_in_flight != 0 { return; } handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel); @@ -6013,7 +6712,7 @@ where fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { - let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id)); + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let peers_without_funded_channels = @@ -6034,73 +6733,82 @@ where // happening and return an error. N.B. that we create channel with an outbound SCID of zero so // that we can delay allocating the SCID until after we're sure that the checks below will // succeed. - let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) { + let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) { Some(unaccepted_channel) => { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height, - &self.logger, accept_0conf).map_err(|e| { - let err_str = e.to_string(); - log_error!(logger, "{}", err_str); - - APIError::ChannelUnavailable { err: err_str } - }) - } + &self.logger, accept_0conf).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)) + }, _ => { let err_str = "No such channel awaiting to be accepted.".to_owned(); log_error!(logger, "{}", err_str); - Err(APIError::APIMisuseError { err: err_str }) + return Err(APIError::APIMisuseError { err: err_str }); } - }?; + }; - if accept_0conf { - // This should have been correctly configured by the call to InboundV1Channel::new. - debug_assert!(channel.context.minimum_depth().unwrap() == 0); - } else if channel.context.get_channel_type().requires_zero_conf() { - let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.context.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } + match res { + Err(err) => { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) { + Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"), + Err(e) => { + return Err(APIError::ChannelUnavailable { err: e.err }); + }, } - }; - peer_state.pending_msg_events.push(send_msg_err_event); - let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned(); - log_error!(logger, "{}", err_str); + } + Ok(mut channel) => { + if accept_0conf { + // This should have been correctly configured by the call to InboundV1Channel::new. + debug_assert!(channel.context.minimum_depth().unwrap() == 0); + } else if channel.context.get_channel_type().requires_zero_conf() { + let send_msg_err_event = events::MessageSendEvent::HandleError { + node_id: channel.context.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage{ + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } + } + }; + peer_state.pending_msg_events.push(send_msg_err_event); + let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned(); + log_error!(logger, "{}", err_str); - return Err(APIError::APIMisuseError { err: err_str }); - } else { - // If this peer already has some channels, a new channel won't increase our number of peers - // with unfunded channels, so as long as we aren't over the maximum number of unfunded - // channels per-peer we can accept channels from a peer with existing ones. - if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { - let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.context.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } - } - }; - peer_state.pending_msg_events.push(send_msg_err_event); - let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned(); - log_error!(logger, "{}", err_str); + return Err(APIError::APIMisuseError { err: err_str }); + } else { + // If this peer already has some channels, a new channel won't increase our number of peers + // with unfunded channels, so as long as we aren't over the maximum number of unfunded + // channels per-peer we can accept channels from a peer with existing ones. + if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { + let send_msg_err_event = events::MessageSendEvent::HandleError { + node_id: channel.context.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage{ + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } + } + }; + peer_state.pending_msg_events.push(send_msg_err_event); + let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned(); + log_error!(logger, "{}", err_str); - return Err(APIError::APIMisuseError { err: err_str }); - } - } + return Err(APIError::APIMisuseError { err: err_str }); + } + } - // Now that we know we have a channel, assign an outbound SCID alias. - let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - channel.context.set_outbound_scid_alias(outbound_scid_alias); + // Now that we know we have a channel, assign an outbound SCID alias. + let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); + channel.context.set_outbound_scid_alias(outbound_scid_alias); - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.context.get_counterparty_node_id(), - msg: channel.accept_inbound_channel(), - }); + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: channel.context.get_counterparty_node_id(), + msg: channel.accept_inbound_channel(), + }); - peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel)); + peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel)); - Ok(()) + Ok(()) + }, + } } /// Gets the number of peers which match the given filter and do not have any funded, outbound, @@ -6111,7 +6819,7 @@ where fn peers_without_funded_channels(&self, maybe_count_peer: Filter) -> usize where Filter: Fn(&PeerState) -> bool { let mut peers_without_funded_channels = 0; - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; { let peer_state_lock = self.per_peer_state.read().unwrap(); for (_, peer_mtx) in peer_state_lock.iter() { @@ -6146,9 +6854,25 @@ where num_unfunded_channels += 1; } }, + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedInboundV2(chan) => { + // Only inbound V2 channels that are not 0conf and that we do not contribute to will be + // included in the unfunded count. + if chan.context.minimum_depth().unwrap_or(1) != 0 && + chan.dual_funding_context.our_funding_satoshis == 0 { + num_unfunded_channels += 1; + } + }, ChannelPhase::UnfundedOutboundV1(_) => { // Outbound channels don't contribute to the unfunded count in the DoS context. continue; + }, + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(_) => { + // Outbound channels don't contribute to the unfunded count in the DoS context. + continue; } } } @@ -6158,12 +6882,14 @@ where fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are // likely to be lost on restart! - if msg.chain_hash != self.chain_hash { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); + if msg.common_fields.chain_hash != self.chain_hash { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } if !self.default_configuration.accept_inbound_channels { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone())); + return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } // Get the number of peers with channels, but without funded ones. We don't care too much @@ -6176,7 +6902,9 @@ where let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()) + MsgHandleErrInternal::send_err_msg_no_close( + format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), + msg.common_fields.temporary_channel_id.clone()) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6190,34 +6918,36 @@ where { return Err(MsgHandleErrInternal::send_err_msg_no_close( "Have too many peers with unfunded channels, not accepting new ones".to_owned(), - msg.temporary_channel_id.clone())); + msg.common_fields.temporary_channel_id.clone())); } - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER { return Err(MsgHandleErrInternal::send_err_msg_no_close( format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER), - msg.temporary_channel_id.clone())); + msg.common_fields.temporary_channel_id.clone())); } - let channel_id = msg.temporary_channel_id; + let channel_id = msg.common_fields.temporary_channel_id; let channel_exists = peer_state.has_channel(&channel_id); if channel_exists { - return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "temporary_channel_id collision for the same peer!".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept. if self.default_configuration.manually_accept_inbound_channels { let channel_type = channel::channel_type_from_open_channel( - &msg, &peer_state.latest_features, &self.channel_type_features() + &msg.common_fields, &peer_state.latest_features, &self.channel_type_features() ).map_err(|e| - MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id) + MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id) )?; let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::OpenChannelRequest { - temporary_channel_id: msg.temporary_channel_id.clone(), + temporary_channel_id: msg.common_fields.temporary_channel_id.clone(), counterparty_node_id: counterparty_node_id.clone(), - funding_satoshis: msg.funding_satoshis, + funding_satoshis: msg.common_fields.funding_satoshis, push_msat: msg.push_msat, channel_type, }, None)); @@ -6237,17 +6967,21 @@ where &self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false) { Err(e) => { - return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)); + return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id)); }, Ok(res) => res }; let channel_type = channel.context.get_channel_type(); if channel_type.requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "No zero confirmation channels accepted".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } if channel_type.requires_anchors_zero_fee_htlc_tx() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone())); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "No channels with anchor outputs accepted".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); @@ -6269,28 +7003,28 @@ where let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id) + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.temporary_channel_id) { + match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) { hash_map::Entry::Occupied(mut phase) => { match phase.get_mut() { ChannelPhase::UnfundedOutboundV1(chan) => { try_chan_phase_entry!(self, chan.accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase); - (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id()) + (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id()) }, _ => { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)); + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)); } } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::FundingGenerationReady { - temporary_channel_id: msg.temporary_channel_id, + temporary_channel_id: msg.common_fields.temporary_channel_id, counterparty_node_id: *counterparty_node_id, channel_value_satoshis: value, output_script, @@ -6314,7 +7048,7 @@ where let (mut chan, funding_msg_opt, monitor) = match peer_state.channel_by_id.remove(&msg.temporary_channel_id) { Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => { - let logger = WithChannelContext::from(&self.logger, &inbound_chan.context); + let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None); match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) { Ok(res) => res, Err((inbound_chan, err)) => { @@ -6331,7 +7065,7 @@ where }, Some(mut phase) => { let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); - let err = ChannelError::Close(err_msg); + let err = ChannelError::close(err_msg); return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1); }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) @@ -6346,7 +7080,7 @@ where // `update_maps_on_chan_removal`), we'll remove the existing channel // from `outpoint_to_peer`. Thus, we must first unset the funding outpoint // on the channel. - let err = ChannelError::Close($err.to_owned()); + let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(msg.temporary_channel_id); return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1); } } } @@ -6386,7 +7120,7 @@ where } Ok(()) } else { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated"); fail_chan!("Duplicate funding outpoint"); } @@ -6414,7 +7148,8 @@ where let logger = WithContext::from( &self.logger, Some(chan.context.get_counterparty_node_id()), - Some(chan.context.channel_id()) + Some(chan.context.channel_id()), + None ); let res = chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger); @@ -6430,7 +7165,7 @@ where } else { unreachable!(); } Ok(()) } else { - let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned()); + let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned()); // We weren't able to watch the channel to begin with, so no // updates should be made on it. Previously, full_stack_target // found an (unreachable) panic when the monitor update contained @@ -6470,7 +7205,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer, self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry); if let Some(announcement_sigs) = announcement_sigs_opt { @@ -6501,7 +7236,7 @@ where Ok(()) } else { - try_chan_phase_entry!(self, Err(ChannelError::Close( + try_chan_phase_entry!(self, Err(ChannelError::close( "Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry) } }, @@ -6528,7 +7263,7 @@ where match phase { ChannelPhase::Funded(chan) => { if !chan.received_shutdown() { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.", msg.channel_id, if chan.sent_shutdown() { " after we initiated shutdown" } else { "" }); @@ -6556,11 +7291,19 @@ where }, ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => { let context = phase.context_mut(); - let logger = WithChannelContext::from(&self.logger, context); + let logger = WithChannelContext::from(&self.logger, context, None); log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); let mut chan = remove_channel_phase!(self, chan_phase_entry); finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel)); }, + // TODO(dual_funding): Combine this match arm with above. + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => { + let context = phase.context_mut(); + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); + let mut chan = remove_channel_phase!(self, chan_phase_entry); + finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel)); + }, } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -6608,7 +7351,7 @@ where (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result) } else { (tx, None, shutdown_result) } } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -6617,14 +7360,13 @@ where }; if let Some(broadcast_tx) = tx { let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id()); - log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id), "Broadcasting {}", log_tx!(broadcast_tx)); + log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); } if let Some(ChannelPhase::Funded(chan)) = chan_option { if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -6661,7 +7403,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { - let pending_forward_info = match decoded_hop_res { + let mut pending_forward_info = match decoded_hop_res { Ok((next_hop, shared_secret, next_packet_pk_opt)) => self.construct_pending_htlc_status( msg, counterparty_node_id, shared_secret, next_hop, @@ -6669,46 +7411,47 @@ where ), Err(e) => PendingHTLCStatus::Fail(e) }; - let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { + let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash)); + // If the update_add is completely bogus, the call will Err and we will close, + // but if we've sent a shutdown and they haven't acknowledged it yet, we just + // want to reject the new HTLC and fail it backwards instead of forwarding. + if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) { if msg.blinding_point.is_some() { - return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed( - msgs::UpdateFailMalformedHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, - } - )) - } - // If the update_add is completely bogus, the call will Err and we will close, - // but if we've sent a shutdown and they haven't acknowledged it yet, we just - // want to reject the new HTLC and fail it backwards instead of forwarding. - match pending_forward_info { - PendingHTLCStatus::Forward(PendingHTLCInfo { - ref incoming_shared_secret, ref routing, .. - }) => { - let reason = if routing.blinded_failure().is_some() { - HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]) - } else if (error_code & 0x1000) != 0 { - let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - HTLCFailReason::reason(real_code, error_data) - } else { - HTLCFailReason::from_failure_code(error_code) - }.get_encrypted_failure_packet(incoming_shared_secret, &None); - let msg = msgs::UpdateFailHTLC { + pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed( + msgs::UpdateFailMalformedHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, - reason - }; - PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) - }, - _ => pending_forward_info + sha256_of_onion: [0; 32], + failure_code: INVALID_ONION_BLINDING, + } + )) + } else { + match pending_forward_info { + PendingHTLCStatus::Forward(PendingHTLCInfo { + ref incoming_shared_secret, ref routing, .. + }) => { + let reason = if routing.blinded_failure().is_some() { + HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]) + } else if (error_code & 0x1000) != 0 { + let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); + HTLCFailReason::reason(real_code, error_data) + } else { + HTLCFailReason::from_failure_code(error_code) + }.get_encrypted_failure_packet(incoming_shared_secret, &None); + let msg = msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason + }; + pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)); + }, + _ => {}, + } } - }; - let logger = WithChannelContext::from(&self.logger, &chan.context); - try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry); + } + try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -6719,7 +7462,8 @@ where fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { let funding_txo; - let (htlc_source, forwarded_htlc_value) = { + let next_user_channel_id; + let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -6733,7 +7477,7 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry); if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", msg.channel_id); @@ -6748,16 +7492,21 @@ where // outbound HTLC is claimed. This is guaranteed to all complete before we // process the RAA as messages are processed from single peers serially. funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded"); + next_user_channel_id = chan.context.get_user_id(); res } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry); } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), + Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id), + funding_txo, msg.channel_id, Some(next_user_channel_id), + ); + Ok(()) } @@ -6777,7 +7526,7 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { try_chan_phase_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -6800,13 +7549,13 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if (msg.failure_code & 0x8000) == 0 { - let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); + let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); try_chan_phase_entry!(self, Err(chan_err), chan_phase_entry); } if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { try_chan_phase_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry); } Ok(()) @@ -6827,7 +7576,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); let funding_txo = chan.context.get_funding_txo(); let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { @@ -6836,7 +7585,7 @@ where } Ok(()) } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -6844,10 +7593,28 @@ where } } + fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec)) { + let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty(); + let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); + push_forward_event &= decode_update_add_htlcs.is_empty(); + let scid = update_add_htlcs.0; + match decode_update_add_htlcs.entry(scid) { + hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); }, + hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); }, + } + if push_forward_event { self.push_pending_forwards_ev(); } + } + + #[inline] + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) { + let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards); + if push_forward_event { self.push_pending_forwards_ev() } + } + #[inline] - fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) { - for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { - let mut push_forward_event = false; + fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool { + let mut push_forward_event = false; + for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { let mut new_intercept_events = VecDeque::new(); let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { @@ -6860,12 +7627,13 @@ where // Pull this now to avoid introducing a lock order with `forward_htlcs`. let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid); + let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty(); let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); let forward_htlcs_empty = forward_htlcs.is_empty(); match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })); + prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })); }, hash_map::Entry::Vacant(entry) => { if !is_our_scid && forward_info.incoming_amt_msat.is_some() && @@ -6883,15 +7651,16 @@ where intercept_id }, None)); entry.insert(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }); + prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }); }, hash_map::Entry::Occupied(_) => { - let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id())); + let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash)); log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, + channel_id: prev_channel_id, htlc_id: prev_htlc_id, incoming_packet_shared_secret: forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -6907,11 +7676,9 @@ where } else { // We don't want to generate a PendingHTLCsForwardable event if only intercepted // payments are being processed. - if forward_htlcs_empty { - push_forward_event = true; - } + push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty; entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }))); + prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }))); } } } @@ -6919,15 +7686,15 @@ where } for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) { - self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); + push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination); } if !new_intercept_events.is_empty() { let mut events = self.pending_events.lock().unwrap(); events.append(&mut new_intercept_events); } - if push_forward_event { self.push_pending_forwards_ev() } } + push_forward_event } fn push_pending_forwards_ev(&self) { @@ -6955,13 +7722,14 @@ where /// the [`ChannelMonitorUpdate`] in question. fn raa_monitor_updates_held(&self, actions_blocking_raa_monitor_updates: &BTreeMap>, - channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey + channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey ) -> bool { actions_blocking_raa_monitor_updates - .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false) + .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false) || self.pending_events.lock().unwrap().iter().any(|(_, action)| { action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint, + channel_id, counterparty_node_id, }) }) @@ -6978,7 +7746,7 @@ where if let Some(chan) = peer_state.channel_by_id.get(&channel_id) { return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates, - chan.context().get_funding_txo().unwrap(), counterparty_node_id); + chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id); } } false @@ -6996,11 +7764,11 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); let funding_txo_opt = chan.context.get_funding_txo(); let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt { self.raa_monitor_updates_held( - &peer_state.actions_blocking_raa_monitor_updates, funding_txo, + &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id, *counterparty_node_id) } else { false }; let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, @@ -7013,7 +7781,7 @@ where } htlcs_to_fail } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7036,10 +7804,10 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7066,7 +7834,7 @@ where peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: try_chan_phase_entry!(self, chan.announcement_signatures( - &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(), + &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height, msg, &self.default_configuration ), chan_phase_entry), // Note that announcement_signatures fails if the channel cannot be announced, @@ -7074,7 +7842,7 @@ where update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()), }); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7116,7 +7884,7 @@ where if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersistNoEvents); } else { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id); let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry); // If nothing changed after applying their update, we don't need to bother @@ -7126,7 +7894,7 @@ where } } } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a channel_update for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7136,7 +7904,6 @@ where } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result { - let htlc_forwards; let need_lnd_workaround = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -7148,7 +7915,7 @@ where msg.channel_id ) })?; - let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)); + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -7179,15 +7946,17 @@ where } } let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take(); - htlc_forwards = self.handle_channel_resumption( + let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order, - Vec::new(), None, responses.channel_ready, responses.announcement_sigs); + Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs); + debug_assert!(htlc_forwards.is_none()); + debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); } need_lnd_workaround } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7227,16 +7996,10 @@ where } }; - let mut persist = NotifyOption::SkipPersistHandleEvents; - if let Some(forwards) = htlc_forwards { - self.forward_htlcs(&mut [forwards][..]); - persist = NotifyOption::DoPersist; - } - if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; } - Ok(persist) + Ok(NotifyOption::SkipPersistHandleEvents) } /// Process pending events from the [`chain::Watch`], returning whether any events were processed. @@ -7246,22 +8009,24 @@ where let mut failed_channels = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { + for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { for monitor_event in monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { - let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id())); + let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash)); if let Some(preimage) = htlc_update.payment_preimage { log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage); - self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint); + self.claim_funds_internal(htlc_update.source, preimage, + htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true, + false, counterparty_node_id, funding_outpoint, channel_id, None); } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); - let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; + let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id }; let reason = HTLCFailReason::from_failure_code(0x4000 | 8); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } }, - MonitorEvent::HolderForceClosed(funding_outpoint) => { + MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => { let counterparty_node_id_opt = match counterparty_node_id { Some(cp_id) => Some(cp_id), None => { @@ -7277,18 +8042,24 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { + if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) { if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) { - failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed)); + let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event { + reason + } else { + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) } + }; + failed_channels.push(chan.context.force_shutdown(false, reason.clone())); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { - msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }) + msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() }) }, }); } @@ -7296,8 +8067,8 @@ where } } }, - MonitorEvent::Completed { funding_txo, monitor_update_id } => { - self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref()); + MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => { + self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref()); }, } } @@ -7342,7 +8113,7 @@ where let counterparty_node_id = chan.context.get_counterparty_node_id(); let funding_txo = chan.context.get_funding_txo(); let (monitor_opt, holding_cell_failed_htlcs) = - chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context)); + chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None)); if !holding_cell_failed_htlcs.is_empty() { failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id)); } @@ -7449,7 +8220,7 @@ where peer_state.channel_by_id.retain(|channel_id, phase| { match phase { ChannelPhase::Funded(chan) => { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) { Ok((msg_opt, tx_opt, shutdown_result_opt)) => { if let Some(msg) = msg_opt { @@ -7466,7 +8237,8 @@ where // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -7514,14 +8286,14 @@ where // Channel::force_shutdown tries to make us do) as we may still be in initialization, // so we track the update internally and handle it when the user next calls // timer_tick_occurred, guaranteeing we're running normally. - if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() { + if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() { assert_eq!(update.updates.len(), 1); if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { assert!(should_broadcast); } else { unreachable!(); } self.pending_background_events.lock().unwrap().push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, update + counterparty_node_id, funding_txo, update, channel_id, }); } self.finish_close_channel(failure); @@ -7531,16 +8303,15 @@ where macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the - /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will - /// not have an expiration unless otherwise set on the builder. + /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer's + /// expiration will be `absolute_expiry` if `Some`, otherwise it will not expire. /// /// # Privacy /// - /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer. - /// However, if one is not found, uses a one-hop [`BlindedPath`] with - /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case, - /// the node must be announced, otherwise, there is no way to find a path to the introduction in - /// order to send the [`InvoiceRequest`]. + /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the offer based on the given + /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for + /// privacy implications as well as those of the parameterized [`Router`], which implements + /// [`MessageRouter`]. /// /// Also, uses a derived signing pubkey in the offer for recipient privacy. /// @@ -7553,25 +8324,29 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { /// /// Errors if the parameterized [`Router`] is unable to create a blinded path for the offer. /// - /// This is not exported to bindings users as builder patterns don't map outside of move semantics. - /// /// [`Offer`]: crate::offers::offer::Offer /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest pub fn create_offer_builder( - &$self, description: String + &$self, absolute_expiry: Option ) -> Result<$builder, Bolt12SemanticError> { let node_id = $self.get_our_node_id(); let expanded_key = &$self.inbound_payment_key; let entropy = &*$self.entropy_source; let secp_ctx = &$self.secp_ctx; - let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?; + let path = $self.create_blinded_path_using_absolute_expiry(absolute_expiry) + .map_err(|_| Bolt12SemanticError::MissingPaths)?; let builder = OfferBuilder::deriving_signing_pubkey( - description, node_id, expanded_key, entropy, secp_ctx + node_id, expanded_key, entropy, secp_ctx ) .chain_hash($self.chain_hash) .path(path); + let builder = match absolute_expiry { + None => builder, + Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry), + }; + Ok(builder.into()) } } } @@ -7599,11 +8374,10 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { /// /// # Privacy /// - /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund. - /// However, if one is not found, uses a one-hop [`BlindedPath`] with - /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case, - /// the node must be announced, otherwise, there is no way to find a path to the introduction in - /// order to send the [`Bolt12Invoice`]. + /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the refund based on the given + /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for + /// privacy implications as well as those of the parameterized [`Router`], which implements + /// [`MessageRouter`]. /// /// Also, uses a derived payer id in the refund for payer privacy. /// @@ -7619,29 +8393,30 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { /// - `amount_msats` is invalid, or /// - the parameterized [`Router`] is unable to create a blinded path for the refund. /// - /// This is not exported to bindings users as builder patterns don't map outside of move semantics. - /// /// [`Refund`]: crate::offers::refund::Refund /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments pub fn create_refund_builder( - &$self, description: String, amount_msats: u64, absolute_expiry: Duration, - payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option + &$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, + retry_strategy: Retry, max_total_routing_fee_msat: Option ) -> Result<$builder, Bolt12SemanticError> { let node_id = $self.get_our_node_id(); let expanded_key = &$self.inbound_payment_key; let entropy = &*$self.entropy_source; let secp_ctx = &$self.secp_ctx; - let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?; + let path = $self.create_blinded_path_using_absolute_expiry(Some(absolute_expiry)) + .map_err(|_| Bolt12SemanticError::MissingPaths)?; let builder = RefundBuilder::deriving_payer_id( - description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id + node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id )? .chain_hash($self.chain_hash) .absolute_expiry(absolute_expiry) .path(path); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self); + let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry); $self.pending_outbound_payments .add_new_awaiting_invoice( @@ -7701,10 +8476,9 @@ where /// /// # Privacy /// - /// Uses a one-hop [`BlindedPath`] for the reply path with [`ChannelManager::get_our_node_id`] - /// as the introduction node and a derived payer id for payer privacy. As such, currently, the - /// node must be announced. Otherwise, there is no way to find a path to the introduction node - /// in order to send the [`Bolt12Invoice`]. + /// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`] + /// to construct a [`BlindedPath`] for the reply path. For further privacy implications, see the + /// docs of the parameterized [`Router`], which implements [`MessageRouter`]. /// /// # Limitations /// @@ -7717,6 +8491,7 @@ where /// Errors if: /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link, /// - the provided parameters are invalid for the offer, + /// - the offer is for an unsupported chain, or /// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice /// request. /// @@ -7756,6 +8531,8 @@ where let invoice_request = builder.build_and_sign()?; let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?; + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let expiration = StaleExpiration::TimerTicks(1); self.pending_outbound_payments .add_new_awaiting_invoice( @@ -7764,14 +8541,7 @@ where .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?; let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); - if offer.paths().is_empty() { - let message = new_pending_onion_message( - OffersMessage::InvoiceRequest(invoice_request), - Destination::Node(offer.signing_pubkey()), - Some(reply_path), - ); - pending_offers_messages.push(message); - } else { + if !offer.paths().is_empty() { // Send as many invoice requests as there are paths in the offer (with an upper bound). // Using only one path could result in a failure if the path no longer exists. But only // one invoice for a given payment id will be paid, even if more than one is received. @@ -7784,6 +8554,16 @@ where ); pending_offers_messages.push(message); } + } else if let Some(signing_pubkey) = offer.signing_pubkey() { + let message = new_pending_onion_message( + OffersMessage::InvoiceRequest(invoice_request), + Destination::Node(signing_pubkey), + Some(reply_path), + ); + pending_offers_messages.push(message); + } else { + debug_assert!(false); + return Err(Bolt12SemanticError::MissingSigningPubkey); } Ok(()) @@ -7794,7 +8574,7 @@ where /// /// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a /// [`BlindedPath`] containing the [`PaymentSecret`] needed to reconstruct the corresponding - /// [`PaymentPreimage`]. + /// [`PaymentPreimage`]. It is returned purely for informational purposes. /// /// # Limitations /// @@ -7805,11 +8585,15 @@ where /// /// # Errors /// - /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply - /// path for the invoice. + /// Errors if: + /// - the refund is for an unsupported chain, or + /// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for + /// the invoice. /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice - pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> { + pub fn request_refund_payment( + &self, refund: &Refund + ) -> Result { let expanded_key = &self.inbound_payment_key; let entropy = &*self.entropy_source; let secp_ctx = &self.secp_ctx; @@ -7817,9 +8601,18 @@ where let amount_msats = refund.amount_msats(); let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32; + if refund.chain() != self.chain_hash { + return Err(Bolt12SemanticError::UnsupportedChain); + } + + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) { Ok((payment_hash, payment_secret)) => { - let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret) + let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {}); + let payment_paths = self.create_blinded_payment_paths( + amount_msats, payment_secret, payment_context + ) .map_err(|_| Bolt12SemanticError::MissingPaths)?; #[cfg(feature = "std")] @@ -7842,7 +8635,7 @@ where let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); if refund.paths().is_empty() { let message = new_pending_onion_message( - OffersMessage::Invoice(invoice), + OffersMessage::Invoice(invoice.clone()), Destination::Node(refund.payer_id()), Some(reply_path), ); @@ -7858,7 +8651,7 @@ where } } - Ok(()) + Ok(invoice) }, Err(()) => Err(Bolt12SemanticError::InvalidAmount), } @@ -7870,10 +8663,9 @@ where /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the /// [`PaymentHash`] and [`PaymentPreimage`] for you. /// - /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which - /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with - /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be - /// passed directly to [`claim_funds`]. + /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`] event, which + /// will have the [`PaymentClaimable::purpose`] return `Some` for [`PaymentPurpose::preimage`]. That + /// should then be passed directly to [`claim_funds`]. /// /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements. /// @@ -7893,8 +8685,7 @@ where /// [`claim_funds`]: Self::claim_funds /// [`PaymentClaimable`]: events::Event::PaymentClaimable /// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose - /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment - /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage + /// [`PaymentPurpose::preimage`]: events::PaymentPurpose::preimage /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32, min_final_cltv_expiry_delta: Option) -> Result<(PaymentHash, PaymentSecret), ()> { @@ -7964,36 +8755,95 @@ where inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key) } + /// Creates a blinded path by delegating to [`MessageRouter`] based on the path's intended + /// lifetime. + /// + /// Whether or not the path is compact depends on whether the path is short-lived or long-lived, + /// respectively, based on the given `absolute_expiry` as seconds since the Unix epoch. See + /// [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. + fn create_blinded_path_using_absolute_expiry( + &self, absolute_expiry: Option + ) -> Result { + let now = self.duration_since_epoch(); + let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY); + + if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry { + self.create_compact_blinded_path() + } else { + self.create_blinded_path() + } + } + + pub(super) fn duration_since_epoch(&self) -> Duration { + #[cfg(not(feature = "std"))] + let now = Duration::from_secs( + self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + ); + #[cfg(feature = "std")] + let now = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH"); + + now + } + /// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`]. /// /// Errors if the `MessageRouter` errors or returns an empty `Vec`. fn create_blinded_path(&self) -> Result { let recipient = self.get_our_node_id(); - let entropy_source = self.entropy_source.deref(); let secp_ctx = &self.secp_ctx; let peers = self.per_peer_state.read().unwrap() .iter() - .filter(|(_, peer)| peer.lock().unwrap().latest_features.supports_onion_messages()) + .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap())) + .filter(|(_, peer)| peer.is_connected) + .filter(|(_, peer)| peer.latest_features.supports_onion_messages()) .map(|(node_id, _)| *node_id) .collect::>(); self.router - .create_blinded_paths(recipient, peers, entropy_source, secp_ctx) + .create_blinded_paths(recipient, peers, secp_ctx) + .and_then(|paths| paths.into_iter().next().ok_or(())) + } + + /// Creates a blinded path by delegating to [`MessageRouter::create_compact_blinded_paths`]. + /// + /// Errors if the `MessageRouter` errors or returns an empty `Vec`. + fn create_compact_blinded_path(&self) -> Result { + let recipient = self.get_our_node_id(); + let secp_ctx = &self.secp_ctx; + + let peers = self.per_peer_state.read().unwrap() + .iter() + .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap())) + .filter(|(_, peer)| peer.is_connected) + .filter(|(_, peer)| peer.latest_features.supports_onion_messages()) + .map(|(node_id, peer)| ForwardNode { + node_id: *node_id, + short_channel_id: peer.channel_by_id + .iter() + .filter(|(_, channel)| channel.context().is_usable()) + .min_by_key(|(_, channel)| channel.context().channel_creation_height) + .and_then(|(_, channel)| channel.context().get_short_channel_id()), + }) + .collect::>(); + + self.router + .create_compact_blinded_paths(recipient, peers, secp_ctx) .and_then(|paths| paths.into_iter().next().ok_or(())) } /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to /// [`Router::create_blinded_payment_paths`]. fn create_blinded_payment_paths( - &self, amount_msats: u64, payment_secret: PaymentSecret + &self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext ) -> Result, ()> { - let entropy_source = self.entropy_source.deref(); let secp_ctx = &self.secp_ctx; let first_hops = self.list_usable_channels(); let payee_node_id = self.get_our_node_id(); - let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY + let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY + LATENCY_GRACE_PERIOD_BLOCKS; let payee_tlvs = ReceiveTlvs { payment_secret, @@ -8001,9 +8851,10 @@ where max_cltv_expiry, htlc_minimum_msat: 1, }, + payment_context, }; self.router.create_blinded_payment_paths( - payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx + payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx ) } @@ -8012,7 +8863,7 @@ where /// /// [phantom node payments]: crate::sign::PhantomKeysManager pub fn get_phantom_scid(&self) -> u64 { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source); @@ -8042,7 +8893,7 @@ where /// Note that this method is not guaranteed to return unique values, you may need to call it a few /// times to get a unique scid. pub fn get_intercept_scid(&self) -> u64 { - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source); @@ -8109,9 +8960,12 @@ where /// [`Event`] being handled) completes, this should be called to restore the channel to normal /// operation. It will double-check that nothing *else* is also blocking the same channel from /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly. - fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option) { + fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, + channel_funding_outpoint: OutPoint, channel_id: ChannelId, + mut completed_blocker: Option) { + let logger = WithContext::from( - &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()) + &self.logger, Some(counterparty_node_id), Some(channel_id), None ); loop { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -8121,28 +8975,29 @@ where if let Some(blocker) = completed_blocker.take() { // Only do this on the first iteration of the loop. if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates - .get_mut(&channel_funding_outpoint.to_channel_id()) + .get_mut(&channel_id) { blockers.retain(|iter| iter != &blocker); } } if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates, - channel_funding_outpoint, counterparty_node_id) { + channel_funding_outpoint, channel_id, counterparty_node_id) { // Check that, while holding the peer lock, we don't have anything else // blocking monitor updates for this channel. If we do, release the monitor // update(s) when those blockers complete. log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first", - &channel_funding_outpoint.to_channel_id()); + &channel_id); break; } - if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) { + if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry( + channel_id) { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", - channel_funding_outpoint.to_channel_id()); + channel_id); handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, peer_state_lck, peer_state, per_peer_state, chan); if further_update_exists { @@ -8152,7 +9007,7 @@ where } } else { log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update", - channel_funding_outpoint.to_channel_id()); + channel_id); } } } @@ -8169,9 +9024,9 @@ where for action in actions { match action { EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint, counterparty_node_id + channel_funding_outpoint, channel_id, counterparty_node_id } => { - self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None); + self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None); } } } @@ -8211,7 +9066,7 @@ where /// will randomly be placed first or last in the returned array. /// /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate` - /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among + /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among /// the `MessageSendEvent`s to the specific peer they were generated under. fn get_and_clear_pending_msg_events(&self) -> Vec { let events = RefCell::new(Vec::new()); @@ -8231,6 +9086,7 @@ where result = NotifyOption::DoPersist; } + let mut is_any_peer_connected = false; let mut pending_events = Vec::new(); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { @@ -8239,6 +9095,15 @@ where if peer_state.pending_msg_events.len() > 0 { pending_events.append(&mut peer_state.pending_msg_events); } + if peer_state.is_connected { + is_any_peer_connected = true + } + } + + // Ensure that we are connected to some peers before getting broadcast messages. + if is_any_peer_connected { + let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap(); + pending_events.append(&mut broadcast_msgs); } if !pending_events.is_empty() { @@ -8286,9 +9151,9 @@ where fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { { let best_block = self.best_block.read().unwrap(); - assert_eq!(best_block.block_hash(), header.prev_blockhash, + assert_eq!(best_block.block_hash, header.prev_blockhash, "Blocks must be connected in chain-order - the connected header must build on the last connected header"); - assert_eq!(best_block.height(), height - 1, + assert_eq!(best_block.height, height - 1, "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height"); } @@ -8303,14 +9168,14 @@ where let new_height = height - 1; { let mut best_block = self.best_block.write().unwrap(); - assert_eq!(best_block.block_hash(), header.block_hash(), + assert_eq!(best_block.block_hash, header.block_hash(), "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header"); - assert_eq!(best_block.height(), height, + assert_eq!(best_block.height, height, "Blocks must be disconnected in chain-order - the disconnected block must have the correct height"); *best_block = BestBlock::new(header.prev_blockhash, new_height) } - self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))); + self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))); } } @@ -8336,13 +9201,13 @@ where let _persistence_guard = PersistenceNotifierGuard::optionally_notify_skipping_background_events( self, || -> NotifyOption { NotifyOption::DoPersist }); - self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)) + self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)) .map(|(a, b)| (a, Vec::new(), b))); - let last_best_block_height = self.best_block.read().unwrap().height(); + let last_best_block_height = self.best_block.read().unwrap().height; if height < last_best_block_height { let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire); - self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))); + self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))); } } @@ -8359,7 +9224,38 @@ where self, || -> NotifyOption { NotifyOption::DoPersist }); *self.best_block.write().unwrap() = BestBlock::new(block_hash, height); - self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))); + let mut min_anchor_feerate = None; + let mut min_non_anchor_feerate = None; + if self.background_events_processed_since_startup.load(Ordering::Relaxed) { + // If we're past the startup phase, update our feerate cache + let mut last_days_feerates = self.last_days_feerates.lock().unwrap(); + if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS { + last_days_feerates.pop_front(); + } + let anchor_feerate = self.fee_estimator + .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee); + let non_anchor_feerate = self.fee_estimator + .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee); + last_days_feerates.push_back((anchor_feerate, non_anchor_feerate)); + if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS { + min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied(); + min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied(); + } + } + + self.do_chain_event(Some(height), |channel| { + let logger = WithChannelContext::from(&self.logger, &channel.context, None); + if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + if let Some(feerate) = min_anchor_feerate { + channel.check_for_stale_feerate(&logger, feerate)?; + } + } else { + if let Some(feerate) = min_non_anchor_feerate { + channel.check_for_stale_feerate(&logger, feerate)?; + } + } + channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)) + }); macro_rules! max_time { ($timestamp: expr) => { @@ -8408,7 +9304,7 @@ where self.do_chain_event(None, |channel| { if let Some(funding_txo) = channel.context.get_funding_txo() { if funding_txo.txid == *txid { - channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context)).map(|()| (None, Vec::new(), None)) + channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context, None)).map(|()| (None, Vec::new(), None)) } else { Ok((None, Vec::new(), None)) } } else { Ok((None, Vec::new(), None)) } }); @@ -8443,10 +9339,14 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; + peer_state.channel_by_id.retain(|_, phase| { match phase { // Retain unfunded channels. ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true, + // TODO(dual_funding): Combine this match arm with above. + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true, ChannelPhase::Funded(channel) => { let res = f(channel); if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { @@ -8455,7 +9355,7 @@ where timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() })); } - let logger = WithChannelContext::from(&self.logger, &channel.context); + let logger = WithChannelContext::from(&self.logger, &channel.context, None); if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); if channel.context.is_usable() { @@ -8515,7 +9415,8 @@ where let reason_message = format!("{}", reason); failed_channels.push(channel.context.force_shutdown(true, reason)); if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -8567,6 +9468,7 @@ where incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, phantom_shared_secret: None, outpoint: htlc.prev_funding_outpoint, + channel_id: htlc.prev_channel_id, blinded_failure: htlc.forward_info.routing.blinded_failure(), }); @@ -8578,7 +9480,7 @@ where HTLCFailReason::from_failure_code(0x2000 | 2), HTLCDestination::InvalidForward { requested_forward_scid })); let logger = WithContext::from( - &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id()) + &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash) ); log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); false @@ -8606,6 +9508,9 @@ where } /// Returns true if this [`ChannelManager`] needs to be persisted. + /// + /// See [`Self::get_event_or_persistence_needed_future`] for retrieving a [`Future`] that + /// indicates this should be checked. pub fn get_and_clear_needs_persistence(&self) -> bool { self.needs_persist_flag.swap(false, Ordering::AcqRel) } @@ -8695,7 +9600,7 @@ where fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Dual-funded channels not supported".to_owned(), - msg.temporary_channel_id.clone())), *counterparty_node_id); + msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id); } fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) { @@ -8711,7 +9616,7 @@ where fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Dual-funded channels not supported".to_owned(), - msg.temporary_channel_id.clone())), *counterparty_node_id); + msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id); } fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) { @@ -8746,18 +9651,21 @@ where msg.channel_id.clone())), *counterparty_node_id); } + #[cfg(splicing)] fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Splicing not supported".to_owned(), msg.channel_id.clone())), *counterparty_node_id); } + #[cfg(splicing)] fn handle_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Splicing not supported (splice_ack)".to_owned(), msg.channel_id.clone())), *counterparty_node_id); } + #[cfg(splicing)] fn handle_splice_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceLocked) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Splicing not supported (splice_locked)".to_owned(), @@ -8888,7 +9796,7 @@ where let mut per_peer_state = self.per_peer_state.write().unwrap(); let remove_peer = { log_debug!( - WithContext::from(&self.logger, Some(*counterparty_node_id), None), + WithContext::from(&self.logger, Some(*counterparty_node_id), None, None), "Marking channels with {} disconnected and generating channel_updates.", log_pubkey!(counterparty_node_id) ); @@ -8899,18 +9807,31 @@ where peer_state.channel_by_id.retain(|_, phase| { let context = match phase { ChannelPhase::Funded(chan) => { - let logger = WithChannelContext::from(&self.logger, &chan.context); + let logger = WithChannelContext::from(&self.logger, &chan.context, None); if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() { // We only retain funded channels that are not shutdown. return true; } &mut chan.context }, - // Unfunded channels will always be removed. - ChannelPhase::UnfundedOutboundV1(chan) => { + // If we get disconnected and haven't yet committed to a funding + // transaction, we can replay the `open_channel` on reconnection, so don't + // bother dropping the channel here. However, if we already committed to + // the funding transaction we don't yet support replaying the funding + // handshake (and bailing if the peer rejects it), so we force-close in + // that case. + ChannelPhase::UnfundedOutboundV1(chan) if chan.is_resumable() => return true, + ChannelPhase::UnfundedOutboundV1(chan) => &mut chan.context, + // Unfunded inbound channels will always be removed. + ChannelPhase::UnfundedInboundV1(chan) => { &mut chan.context }, - ChannelPhase::UnfundedInboundV1(chan) => { + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(chan) => { + &mut chan.context + }, + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedInboundV2(chan) => { &mut chan.context }, }; @@ -8961,7 +9882,12 @@ where // Gossip &events::MessageSendEvent::SendChannelAnnouncement { .. } => false, &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, - &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`] + // This check here is to ensure exhaustivity. + &events::MessageSendEvent::BroadcastChannelUpdate { .. } => { + debug_assert!(false, "This event shouldn't have been here"); + false + }, &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::SendChannelUpdate { .. } => false, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, @@ -8986,7 +9912,7 @@ where } fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> { - let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None); + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None, None); if !init_msg.features.supports_static_remote_key() { log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); return Err(()); @@ -9011,8 +9937,8 @@ where return NotifyOption::SkipPersistNoEvents; } e.insert(Mutex::new(PeerState { - channel_by_id: HashMap::new(), - inbound_channel_request_by_id: HashMap::new(), + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -9025,7 +9951,7 @@ where let mut peer_state = e.get().lock().unwrap(); peer_state.latest_features = init_msg.features.clone(); - let best_block_height = self.best_block.read().unwrap().height(); + let best_block_height = self.best_block.read().unwrap().height; if inbound_peer_limited && Self::unfunded_channel_count(&*peer_state, best_block_height) == peer_state.channel_by_id.len() @@ -9048,15 +9974,49 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)| - if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).for_each(|chan| { - let logger = WithChannelContext::from(&self.logger, &chan.context); - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.context.get_counterparty_node_id(), - msg: chan.get_channel_reestablish(&&logger), - }); - }); + for (_, phase) in peer_state.channel_by_id.iter_mut() { + match phase { + ChannelPhase::Funded(chan) => { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.context.get_counterparty_node_id(), + msg: chan.get_channel_reestablish(&&logger), + }); + } + + ChannelPhase::UnfundedOutboundV1(chan) => { + pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: chan.context.get_counterparty_node_id(), + msg: chan.get_open_channel(self.chain_hash), + }); + } + + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedOutboundV2(chan) => { + pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 { + node_id: chan.context.get_counterparty_node_id(), + msg: chan.get_open_channel_v2(self.chain_hash), + }); + }, + + ChannelPhase::UnfundedInboundV1(_) => { + // Since unfunded inbound channel maps are cleared upon disconnecting a peer, + // they are not persisted and won't be recovered after a crash. + // Therefore, they shouldn't exist at this point. + debug_assert!(false); + } + + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] + ChannelPhase::UnfundedInboundV2(channel) => { + // Since unfunded inbound channel maps are cleared upon disconnecting a peer, + // they are not persisted and won't be recovered after a crash. + // Therefore, they shouldn't exist at this point. + debug_assert!(false); + }, + } + } } return NotifyOption::SkipPersistHandleEvents; @@ -9066,8 +10026,6 @@ where } fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - match &msg.data as &str { "cannot co-op close channel w/ active htlcs"| "link failed to shutdown" => @@ -9080,34 +10038,45 @@ where // We're not going to bother handling this in a sensible way, instead simply // repeating the Shutdown message on repeat until morale improves. if !msg.channel_id.is_zero() { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); - if peer_state_mutex_opt.is_none() { return; } - let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) { - if let Some(msg) = chan.get_outbound_shutdown() { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } - peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: *counterparty_node_id, - action: msgs::ErrorAction::SendWarningMessage { - msg: msgs::WarningMessage { - channel_id: msg.channel_id, - data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned() - }, - log_level: Level::Trace, + PersistenceNotifierGuard::optionally_notify( + self, + || -> NotifyOption { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; } + let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) { + if let Some(msg) = chan.get_outbound_shutdown() { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } + peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: *counterparty_node_id, + action: msgs::ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id: msg.channel_id, + data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned() + }, + log_level: Level::Trace, + } + }); + // This can happen in a fairly tight loop, so we absolutely cannot trigger + // a `ChannelManager` write here. + return NotifyOption::SkipPersistHandleEvents; } - }); - } + NotifyOption::SkipPersistNoEvents + } + ); } return; } _ => {} } + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + if msg.channel_id.is_zero() { let channel_ids: Vec = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -9132,14 +10101,29 @@ where if peer_state_mutex_opt.is_none() { return; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) { - if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: *counterparty_node_id, - msg, - }); - return; - } + match peer_state.channel_by_id.get_mut(&msg.channel_id) { + Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: *counterparty_node_id, + msg, + }); + return; + } + }, + #[cfg(any(dual_funding, splicing))] + Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 { + node_id: *counterparty_node_id, + msg, + }); + return; + } + }, + None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::Funded(_)) => (), + #[cfg(any(dual_funding, splicing))] + Some(ChannelPhase::UnfundedInboundV2(_)) => (), } } @@ -9227,23 +10211,27 @@ where R::Target: Router, L::Target: Logger, { - fn handle_message(&self, message: OffersMessage) -> Option { + fn handle_message(&self, message: OffersMessage, responder: Option) -> ResponseInstruction { let secp_ctx = &self.secp_ctx; let expanded_key = &self.inbound_payment_key; match message { OffersMessage::InvoiceRequest(invoice_request) => { + let responder = match responder { + Some(responder) => responder, + None => return ResponseInstruction::NoResponse, + }; let amount_msats = match InvoiceBuilder::::amount_msats( &invoice_request ) { Ok(amount_msats) => amount_msats, - Err(error) => return Some(OffersMessage::InvoiceError(error.into())), + Err(error) => return responder.respond(OffersMessage::InvoiceError(error.into())), }; let invoice_request = match invoice_request.verify(expanded_key, secp_ctx) { Ok(invoice_request) => invoice_request, Err(()) => { let error = Bolt12SemanticError::InvalidMetadata; - return Some(OffersMessage::InvoiceError(error.into())); + return responder.respond(OffersMessage::InvoiceError(error.into())); }, }; @@ -9254,17 +10242,21 @@ where Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret), Err(()) => { let error = Bolt12SemanticError::InvalidAmount; - return Some(OffersMessage::InvoiceError(error.into())); + return responder.respond(OffersMessage::InvoiceError(error.into())); }, }; + let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext { + offer_id: invoice_request.offer_id, + invoice_request: invoice_request.fields(), + }); let payment_paths = match self.create_blinded_payment_paths( - amount_msats, payment_secret + amount_msats, payment_secret, payment_context ) { Ok(payment_paths) => payment_paths, Err(()) => { let error = Bolt12SemanticError::MissingPaths; - return Some(OffersMessage::InvoiceError(error.into())); + return responder.respond(OffersMessage::InvoiceError(error.into())); }, }; @@ -9273,7 +10265,7 @@ where self.highest_seen_timestamp.load(Ordering::Acquire) as u64 ); - if invoice_request.keys.is_some() { + let response = if invoice_request.keys.is_some() { #[cfg(feature = "std")] let builder = invoice_request.respond_using_derived_keys( payment_paths, payment_hash @@ -9282,12 +10274,10 @@ where let builder = invoice_request.respond_using_derived_keys_no_std( payment_paths, payment_hash, created_at ); - let builder: Result, _> = - builder.map(|b| b.into()); - match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) { - Ok(invoice) => Some(OffersMessage::Invoice(invoice)), - Err(error) => Some(OffersMessage::InvoiceError(error.into())), - } + builder + .map(InvoiceBuilder::::from) + .and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx)) + .map_err(InvoiceError::from) } else { #[cfg(feature = "std")] let builder = invoice_request.respond_with(payment_paths, payment_hash); @@ -9295,50 +10285,72 @@ where let builder = invoice_request.respond_with_no_std( payment_paths, payment_hash, created_at ); - let builder: Result, _> = - builder.map(|b| b.into()); - let response = builder.and_then(|builder| builder.allow_mpp().build()) - .map_err(|e| OffersMessage::InvoiceError(e.into())) + builder + .map(InvoiceBuilder::::from) + .and_then(|builder| builder.allow_mpp().build()) + .map_err(InvoiceError::from) .and_then(|invoice| { #[cfg(c_bindings)] let mut invoice = invoice; - match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) { - Ok(invoice) => Ok(OffersMessage::Invoice(invoice)), - Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError( - InvoiceError::from_string("Failed signing invoice".to_string()) - )), - Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError( - InvoiceError::from_string("Failed invoice signature verification".to_string()) - )), - } - }); - match response { - Ok(invoice) => Some(invoice), - Err(error) => Some(error), - } + invoice + .sign(|invoice: &UnsignedBolt12Invoice| + self.node_signer.sign_bolt12_invoice(invoice) + ) + .map_err(InvoiceError::from) + }) + }; + + match response { + Ok(invoice) => responder.respond(OffersMessage::Invoice(invoice)), + Err(error) => responder.respond(OffersMessage::InvoiceError(error.into())), } }, OffersMessage::Invoice(invoice) => { - match invoice.verify(expanded_key, secp_ctx) { - Err(()) => { - Some(OffersMessage::InvoiceError(InvoiceError::from_string("Unrecognized invoice".to_owned()))) - }, - Ok(_) if invoice.invoice_features().requires_unknown_bits_from(&self.bolt12_invoice_features()) => { - Some(OffersMessage::InvoiceError(Bolt12SemanticError::UnknownRequiredFeatures.into())) - }, + let result = match invoice.verify(expanded_key, secp_ctx) { Ok(payment_id) => { - if let Err(e) = self.send_payment_for_bolt12_invoice(&invoice, payment_id) { - log_trace!(self.logger, "Failed paying invoice: {:?}", e); - Some(OffersMessage::InvoiceError(InvoiceError::from_string(format!("{:?}", e)))) + let features = self.bolt12_invoice_features(); + if invoice.invoice_features().requires_unknown_bits_from(&features) { + Err(InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)) + } else if self.default_configuration.manually_handle_bolt12_invoices { + let event = Event::InvoiceReceived { payment_id, invoice, responder }; + self.pending_events.lock().unwrap().push_back((event, None)); + return ResponseInstruction::NoResponse; } else { - None + self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id) + .map_err(|e| { + log_trace!(self.logger, "Failed paying invoice: {:?}", e); + InvoiceError::from_string(format!("{:?}", e)) + }) } }, + Err(()) => Err(InvoiceError::from_string("Unrecognized invoice".to_owned())), + }; + + match result { + Ok(()) => ResponseInstruction::NoResponse, + Err(e) => match responder { + Some(responder) => responder.respond(OffersMessage::InvoiceError(e)), + None => { + log_trace!(self.logger, "No reply path for sending invoice error: {:?}", e); + ResponseInstruction::NoResponse + }, + }, + } + }, + #[cfg(async_payments)] + OffersMessage::StaticInvoice(_invoice) => { + match responder { + Some(responder) => { + responder.respond(OffersMessage::InvoiceError( + InvoiceError::from_string("Static invoices not yet supported".to_string()) + )) + }, + None => return ResponseInstruction::NoResponse, } }, OffersMessage::InvoiceError(invoice_error) => { log_trace!(self.logger, "Received invoice_error: {}", invoice_error); - None + ResponseInstruction::NoResponse }, } } @@ -9348,6 +10360,23 @@ where } } +impl +NodeIdLookUp for ChannelManager +where + M::Target: chain::Watch<::EcdsaSigner>, + T::Target: BroadcasterInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + R::Target: Router, + L::Target: Logger, +{ + fn next_node_id(&self, short_channel_id: u64) -> Option { + self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey) + } +} + /// Fetches the set of [`NodeFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { @@ -9412,134 +10441,6 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; -impl_writeable_tlv_based!(CounterpartyForwardingInfo, { - (2, fee_base_msat, required), - (4, fee_proportional_millionths, required), - (6, cltv_expiry_delta, required), -}); - -impl_writeable_tlv_based!(ChannelCounterparty, { - (2, node_id, required), - (4, features, required), - (6, unspendable_punishment_reserve, required), - (8, forwarding_info, option), - (9, outbound_htlc_minimum_msat, option), - (11, outbound_htlc_maximum_msat, option), -}); - -impl Writeable for ChannelDetails { - fn write(&self, writer: &mut W) -> Result<(), io::Error> { - // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with - // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. - let user_channel_id_low = self.user_channel_id as u64; - let user_channel_id_high_opt = Some((self.user_channel_id >> 64) as u64); - write_tlv_fields!(writer, { - (1, self.inbound_scid_alias, option), - (2, self.channel_id, required), - (3, self.channel_type, option), - (4, self.counterparty, required), - (5, self.outbound_scid_alias, option), - (6, self.funding_txo, option), - (7, self.config, option), - (8, self.short_channel_id, option), - (9, self.confirmations, option), - (10, self.channel_value_satoshis, required), - (12, self.unspendable_punishment_reserve, option), - (14, user_channel_id_low, required), - (16, self.balance_msat, required), - (18, self.outbound_capacity_msat, required), - (19, self.next_outbound_htlc_limit_msat, required), - (20, self.inbound_capacity_msat, required), - (21, self.next_outbound_htlc_minimum_msat, required), - (22, self.confirmations_required, option), - (24, self.force_close_spend_delay, option), - (26, self.is_outbound, required), - (28, self.is_channel_ready, required), - (30, self.is_usable, required), - (32, self.is_public, required), - (33, self.inbound_htlc_minimum_msat, option), - (35, self.inbound_htlc_maximum_msat, option), - (37, user_channel_id_high_opt, option), - (39, self.feerate_sat_per_1000_weight, option), - (41, self.channel_shutdown_state, option), - }); - Ok(()) - } -} - -impl Readable for ChannelDetails { - fn read(reader: &mut R) -> Result { - _init_and_read_len_prefixed_tlv_fields!(reader, { - (1, inbound_scid_alias, option), - (2, channel_id, required), - (3, channel_type, option), - (4, counterparty, required), - (5, outbound_scid_alias, option), - (6, funding_txo, option), - (7, config, option), - (8, short_channel_id, option), - (9, confirmations, option), - (10, channel_value_satoshis, required), - (12, unspendable_punishment_reserve, option), - (14, user_channel_id_low, required), - (16, balance_msat, required), - (18, outbound_capacity_msat, required), - // Note that by the time we get past the required read above, outbound_capacity_msat will be - // filled in, so we can safely unwrap it here. - (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), - (20, inbound_capacity_msat, required), - (21, next_outbound_htlc_minimum_msat, (default_value, 0)), - (22, confirmations_required, option), - (24, force_close_spend_delay, option), - (26, is_outbound, required), - (28, is_channel_ready, required), - (30, is_usable, required), - (32, is_public, required), - (33, inbound_htlc_minimum_msat, option), - (35, inbound_htlc_maximum_msat, option), - (37, user_channel_id_high_opt, option), - (39, feerate_sat_per_1000_weight, option), - (41, channel_shutdown_state, option), - }); - - // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with - // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. - let user_channel_id_low: u64 = user_channel_id_low.0.unwrap(); - let user_channel_id = user_channel_id_low as u128 + - ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64); - - Ok(Self { - inbound_scid_alias, - channel_id: channel_id.0.unwrap(), - channel_type, - counterparty: counterparty.0.unwrap(), - outbound_scid_alias, - funding_txo, - config, - short_channel_id, - channel_value_satoshis: channel_value_satoshis.0.unwrap(), - unspendable_punishment_reserve, - user_channel_id, - balance_msat: balance_msat.0.unwrap(), - outbound_capacity_msat: outbound_capacity_msat.0.unwrap(), - next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), - next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(), - inbound_capacity_msat: inbound_capacity_msat.0.unwrap(), - confirmations_required, - confirmations, - force_close_spend_delay, - is_outbound: is_outbound.0.unwrap(), - is_channel_ready: is_channel_ready.0.unwrap(), - is_usable: is_usable.0.unwrap(), - is_public: is_public.0.unwrap(), - inbound_htlc_minimum_msat, - inbound_htlc_maximum_msat, - feerate_sat_per_1000_weight, - channel_shutdown_state, - }) - } -} - impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), (4, phantom_scid, required), @@ -9564,9 +10465,11 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (3, payment_metadata, option), (5, custom_tlvs, optional_vec), (7, requires_blinded_error, (default_value, false)), + (9, payment_context, option), }, (2, ReceiveKeysend) => { (0, payment_preimage, required), + (1, requires_blinded_error, (default_value, false)), (2, incoming_cltv_expiry, required), (3, payment_metadata, option), (4, payment_data, option), // Added in 0.0.116 @@ -9670,12 +10573,17 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (4, htlc_id, required), (6, incoming_packet_shared_secret, required), (7, user_channel_id, option), + // Note that by the time we get past the required read for type 2 above, outpoint will be + // filled in, so we can safely unwrap it here. + (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))), }); impl Writeable for ClaimableHTLC { fn write(&self, writer: &mut W) -> Result<(), io::Error> { let (payment_data, keysend_preimage) = match &self.onion_payload { - OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None), + OnionPayload::Invoice { _legacy_hop_data } => { + (_legacy_hop_data.as_ref(), None) + }, OnionPayload::Spontaneous(preimage) => (None, Some(preimage)), }; write_tlv_fields!(writer, { @@ -9821,6 +10729,9 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, { (2, prev_short_channel_id, required), (4, prev_htlc_id, required), (6, prev_funding_outpoint, required), + // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be + // filled in, so we can safely unwrap it here. + (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))), }); impl Writeable for HTLCForwardInfo { @@ -9913,13 +10824,14 @@ where self.chain_hash.write(writer)?; { let best_block = self.best_block.read().unwrap(); - best_block.height().write(writer)?; - best_block.block_hash().write(writer)?; + best_block.height.write(writer)?; + best_block.block_hash.write(writer)?; } + let per_peer_state = self.per_peer_state.write().unwrap(); + let mut serializable_peer_count: u64 = 0; { - let per_peer_state = self.per_peer_state.read().unwrap(); let mut number_of_funded_channels = 0; for (_, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -9960,7 +10872,11 @@ where } } - let per_peer_state = self.per_peer_state.write().unwrap(); + let mut decode_update_add_htlcs_opt = None; + let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); + if !decode_update_add_htlcs.is_empty() { + decode_update_add_htlcs_opt = Some(decode_update_add_htlcs); + } let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); let claimable_payments = self.claimable_payments.lock().unwrap(); @@ -10064,7 +10980,7 @@ where } // Encode without retry info for 0.0.101 compatibility. - let mut pending_outbound_payments_no_retry: HashMap> = HashMap::new(); + let mut pending_outbound_payments_no_retry: HashMap> = new_hash_map(); for (id, outbound) in pending_outbound_payments.iter() { match outbound { PendingOutboundPayment::Legacy { session_privs } | @@ -10092,7 +11008,7 @@ where for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) { for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() { if !updates.is_empty() { - if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); } + if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); } in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates); } } @@ -10111,6 +11027,7 @@ where (10, in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), + (14, decode_update_add_htlcs_opt, option), }); Ok(()) @@ -10158,14 +11075,6 @@ impl Readable for VecDeque<(Event, Option)> { } } -impl_writeable_tlv_based_enum!(ChannelShutdownState, - (0, NotShuttingDown) => {}, - (2, ShutdownInitiated) => {}, - (4, ResolvingHTLCs) => {}, - (6, NegotiatingClosingFee) => {}, - (8, ShutdownComplete) => {}, ; -); - /// Arguments for the creation of a ChannelManager that are not deserialized. /// /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation @@ -10281,7 +11190,9 @@ where mut channel_monitors: Vec<&'a mut ChannelMonitor<::EcdsaSigner>>) -> Self { Self { entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config, - channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect() + channel_monitors: hash_map_from_iter( + channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }) + ), } } } @@ -10328,18 +11239,20 @@ where let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; - let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut funded_peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); + let mut funded_peer_channels: HashMap>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); + let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; - let logger = WithChannelContext::from(&args.logger, &channel.context); + let logger = WithChannelContext::from(&args.logger, &channel.context, None); let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; + funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id()); funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || @@ -10369,9 +11282,9 @@ where if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } - if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update { + if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update { close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, update + counterparty_node_id, funding_txo, channel_id, update }); } failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs); @@ -10396,6 +11309,7 @@ where // claim update ChannelMonitor updates were persisted prior to persising // the ChannelMonitor update for the forward leg, so attempting to fail the // backwards leg of the HTLC will simply be rejected. + let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash)); log_info!(logger, "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", &channel.context.channel_id(), &payment_hash); @@ -10403,9 +11317,10 @@ where } } } else { - log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", + channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id()); + log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates", &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), - monitor.get_latest_update_id()); + monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } @@ -10418,7 +11333,7 @@ where by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel)); }, hash_map::Entry::Vacant(entry) => { - let mut by_id_map = HashMap::new(); + let mut by_id_map = new_hash_map(); by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel)); entry.insert(by_id_map); } @@ -10449,21 +11364,23 @@ where for (funding_txo, monitor) in args.channel_monitors.iter() { if !funding_txo_set.contains(funding_txo) { - let logger = WithChannelMonitor::from(&args.logger, monitor); + let logger = WithChannelMonitor::from(&args.logger, monitor, None); + let channel_id = monitor.channel_id(); log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed", - &funding_txo.to_channel_id()); + &channel_id); let monitor_update = ChannelMonitorUpdate { update_id: CLOSED_CHANNEL_UPDATE_ID, counterparty_node_id: None, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], + channel_id: Some(monitor.channel_id()), }; - close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); + close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update))); } } const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; @@ -10489,7 +11406,7 @@ where let peer_state_from_chans = |channel_by_id| { PeerState { channel_by_id, - inbound_channel_request_by_id: HashMap::new(), + inbound_channel_request_by_id: new_hash_map(), latest_features: InitFeatures::empty(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -10500,10 +11417,10 @@ where }; let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); + let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; - let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()); + let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map()); let mut peer_state = peer_state_from_chans(peer_chans); peer_state.latest_features = Readable::read(reader)?; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); @@ -10537,7 +11454,7 @@ where let highest_seen_timestamp: u32 = Readable::read(reader)?; let pending_inbound_payment_count: u64 = Readable::read(reader)?; - let mut pending_inbound_payments: HashMap = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32))); + let mut pending_inbound_payments: HashMap = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32))); for _ in 0..pending_inbound_payment_count { if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() { return Err(DecodeError::InvalidValue); @@ -10546,11 +11463,11 @@ where let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; let mut pending_outbound_payments_compat: HashMap = - HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); + hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); for _ in 0..pending_outbound_payments_count_compat { let session_priv = Readable::read(reader)?; let payment = PendingOutboundPayment::Legacy { - session_privs: [session_priv].iter().cloned().collect() + session_privs: hash_set_from_iter([session_priv]), }; if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { return Err(DecodeError::InvalidValue) @@ -10560,16 +11477,17 @@ where // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = Some(HashMap::new()); + let mut pending_intercepted_htlcs: Option> = Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; let mut claimable_htlc_onion_fields = None; - let mut pending_claiming_payments = Some(HashMap::new()); + let mut pending_claiming_payments = Some(new_hash_map()); let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); let mut events_override = None; let mut in_flight_monitor_updates: Option>> = None; + let mut decode_update_add_htlcs: Option>> = None; read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), (2, pending_intercepted_htlcs, option), @@ -10583,7 +11501,9 @@ where (10, in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), + (14, decode_update_add_htlcs, option), }); + let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } @@ -10603,7 +11523,7 @@ where if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); } else if pending_outbound_payments.is_none() { - let mut outbounds = HashMap::new(); + let mut outbounds = new_hash_map(); for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() { outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); } @@ -10634,12 +11554,13 @@ where $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); for update in $chan_in_flight_upds.iter() { log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", - update.update_id, $channel_info_log, &$funding_txo.to_channel_id()); + update.update_id, $channel_info_log, &$monitor.channel_id()); max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); pending_background_events.push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id: $counterparty_node_id, funding_txo: $funding_txo, + channel_id: $monitor.channel_id(), update: update.clone(), }); } @@ -10650,7 +11571,7 @@ where pending_background_events.push( BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id: $counterparty_node_id, - channel_id: $funding_txo.to_channel_id(), + channel_id: $monitor.channel_id(), }); } if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() { @@ -10666,7 +11587,7 @@ where let peer_state = &mut *peer_state_lock; for phase in peer_state.channel_by_id.values() { if let ChannelPhase::Funded(chan) = phase { - let logger = WithChannelContext::from(&args.logger, &chan.context); + let logger = WithChannelContext::from(&args.logger, &chan.context, None); // Channels that were persisted have to be funded, otherwise they should have been // discarded. @@ -10682,7 +11603,7 @@ where } } if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id { - // If the channel is ahead of the monitor, return InvalidValue: + // If the channel is ahead of the monitor, return DangerousValue: log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id); @@ -10691,7 +11612,7 @@ where log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - return Err(DecodeError::InvalidValue); + return Err(DecodeError::DangerousValue); } } else { // We shouldn't have persisted (or read) any unfunded channel types so none should have been @@ -10704,25 +11625,27 @@ where if let Some(in_flight_upds) = in_flight_monitor_updates { for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds { - let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id())); + let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied(); + let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None); if let Some(monitor) = args.channel_monitors.get(&funding_txo) { // Now that we've removed all the in-flight monitor updates for channels that are // still open, we need to replay any monitor updates that are for closed channels, // creating the neccessary peer_state entries as we go. let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| { - Mutex::new(peer_state_from_chans(HashMap::new())) + Mutex::new(peer_state_from_chans(new_hash_map())) }); let mut peer_state = peer_state_mutex.lock().unwrap(); handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, funding_txo, monitor, peer_state, logger, "closed "); } else { log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(logger, " The ChannelMonitor for channel {} is missing.", - &funding_txo.to_channel_id()); + log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) = + channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } ); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); + log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates); return Err(DecodeError::InvalidValue); } } @@ -10748,8 +11671,8 @@ where for (_, monitor) in args.channel_monitors.iter() { let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0); if counterparty_opt.is_none() { - let logger = WithChannelMonitor::from(&args.logger, monitor); for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { + let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source { if path.hops.is_empty() { log_error!(logger, "Got an empty path for a pending payment"); @@ -10771,7 +11694,7 @@ where retry_strategy: None, attempts: PaymentAttempts::new(), payment_params: None, - session_privs: [session_priv_bytes].iter().map(|a| *a).collect(), + session_privs: hash_set_from_iter([session_priv_bytes]), payment_hash: htlc.payment_hash, payment_secret: None, // only used for retries, and we'll never retry on startup payment_metadata: None, // only used for retries, and we'll never retry on startup @@ -10790,6 +11713,7 @@ where } } for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() { + let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { @@ -10801,12 +11725,24 @@ where // still have an entry for this HTLC in `forward_htlcs` or // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. + decode_update_add_htlcs.retain(|scid, update_add_htlcs| { + update_add_htlcs.retain(|update_add_htlc| { + let matches = *scid == prev_hop_data.short_channel_id && + update_add_htlc.htlc_id == prev_hop_data.htlc_id; + if matches { + log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + } + !matches + }); + !update_add_htlcs.is_empty() + }); forward_htlcs.retain(|_, forwards| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id()); + &htlc.payment_hash, &monitor.channel_id()); false } else { true } } else { true } @@ -10816,7 +11752,7 @@ where pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id()); + &htlc.payment_hash, &monitor.channel_id()); pending_events_read.retain(|(event, _)| { if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { intercepted_id != ev_id @@ -10840,6 +11776,7 @@ where let compl_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint: monitor.get_funding_txo().0, + channel_id: monitor.channel_id(), counterparty_node_id: path.hops[0].pubkey, }; pending_outbounds.claim_htlc(payment_id, preimage, session_priv, @@ -10865,7 +11802,7 @@ where // channel_id -> peer map entry). counterparty_opt.is_none(), counterparty_opt.cloned().or(monitor.get_counterparty_node_id()), - monitor.get_funding_txo().0)) + monitor.get_funding_txo().0, monitor.channel_id())) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -10881,7 +11818,7 @@ where } } - if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() { + if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() { // If we have pending HTLCs to forward, assume we either dropped a // `PendingHTLCsForwardable` or the user received it but never processed it as they // shut down before the timer hit. Either way, set the time_forwardable to a small @@ -10895,7 +11832,7 @@ where let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); - let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len()); + let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); if let Some(purposes) = claimable_htlc_purposes { if purposes.len() != claimable_htlcs_list.len() { return Err(DecodeError::InvalidValue); @@ -10930,7 +11867,7 @@ where let purpose = match &htlcs[0].onion_payload { OnionPayload::Invoice { _legacy_hop_data } => { if let Some(hop_data) = _legacy_hop_data { - events::PaymentPurpose::InvoicePayment { + events::PaymentPurpose::Bolt11InvoicePayment { payment_preimage: match pending_inbound_payments.get(&payment_hash) { Some(inbound_payment) => inbound_payment.payment_preimage, None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) { @@ -10968,13 +11905,13 @@ where } } - let mut outbound_scid_aliases = HashSet::new(); + let mut outbound_scid_aliases = new_hash_set(); for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (chan_id, phase) in peer_state.channel_by_id.iter_mut() { if let ChannelPhase::Funded(chan) = phase { - let logger = WithChannelContext::from(&args.logger, &chan.context); + let logger = WithChannelContext::from(&args.logger, &chan.context, None); if chan.context.outbound_scid_alias() == 0 { let mut outbound_scid_alias; loop { @@ -11038,13 +11975,13 @@ where // this channel as well. On the flip side, there's no harm in restarting // without the new monitor persisted - we'll end up right back here on // restart. - let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id(); + let previous_channel_id = claimable_htlc.prev_hop.channel_id; if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) { let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) { - let logger = WithChannelContext::from(&args.logger, &channel.context); + let logger = WithChannelContext::from(&args.logger, &channel.context, Some(payment_hash)); channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger); } } @@ -11059,6 +11996,7 @@ where amount_msat: claimable_amt_msat, htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(), sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat), + onion_fields: payment.onion_fields, }, None)); } } @@ -11067,18 +12005,18 @@ where for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() { if let Some(peer_state) = per_peer_state.get(&node_id) { for (channel_id, actions) in monitor_update_blocked_actions.iter() { - let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id)); + let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None); for action in actions.iter() { if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: - Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), .. + Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), .. } = action { - if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) { + if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) { log_trace!(logger, "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", - blocked_channel_outpoint.to_channel_id()); + blocked_channel_id); blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates - .entry(blocked_channel_outpoint.to_channel_id()) + .entry(*blocked_channel_id) .or_insert_with(Vec::new).push(blocking_action.clone()); } else { // If the channel we were blocking has closed, we don't need to @@ -11095,7 +12033,7 @@ where } peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; } else { - log_error!(WithContext::from(&args.logger, Some(node_id), None), "Got blocked actions without a per-peer-state for {}", node_id); + log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id); return Err(DecodeError::InvalidValue); } } @@ -11115,6 +12053,7 @@ where pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), forward_htlcs: Mutex::new(forward_htlcs), + decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), outpoint_to_peer: Mutex::new(outpoint_to_peer), @@ -11143,10 +12082,14 @@ where pending_offers_messages: Mutex::new(Vec::new()), + pending_broadcast_messages: Mutex::new(Vec::new()), + entropy_source: args.entropy_source, node_signer: args.node_signer, signer_provider: args.signer_provider, + last_days_feerates: Mutex::new(VecDeque::new()), + logger: args.logger, default_configuration: args.default_config, }; @@ -11158,12 +12101,14 @@ where channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay { + for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. - channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), - downstream_closed, true, downstream_node_id, downstream_funding); + channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None, + downstream_closed, true, downstream_node_id, downstream_funding, + downstream_channel_id, None + ); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -11180,8 +12125,7 @@ mod tests { use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use core::sync::atomic::Ordering; use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; - use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; - use crate::ln::ChannelId; + use crate::ln::types::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret}; use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, ErrorAction}; @@ -11672,6 +12616,61 @@ mod tests { } } + #[test] + fn test_channel_update_cached() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap(); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + + // Confirm that the channel_update was not sent immediately to node[1] but was cached. + let node_1_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(node_1_events.len(), 0); + + { + // Assert that ChannelUpdate message has been added to node[0] pending broadcast messages + let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap(); + assert_eq!(pending_broadcast_messages.len(), 1); + } + + // Test that we do not retrieve the pending broadcast messages when we are not connected to any peer + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + let node_0_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(node_0_events.len(), 0); + + // Now we reconnect to a peer + nodes[0].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { + features: nodes[2].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[2].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages + let node_0_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(node_0_events.len(), 1); + match &node_0_events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => (), + _ => panic!("Unexpected event"), + } + { + // Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages + let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap(); + assert_eq!(pending_broadcast_messages.len(), 0); + } + } + #[test] fn test_drop_disconnected_peers_when_removing_channels() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -11683,11 +12682,11 @@ mod tests { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - - nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); { // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been @@ -11850,8 +12849,8 @@ mod tests { } let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000); } fn check_not_connected_to_peer_error(res_err: Result, expected_public_key: PublicKey) { @@ -11901,6 +12900,7 @@ mod tests { let channel_id = ChannelId::from_bytes([4; 32]); let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap()); let intercept_id = InterceptId([0; 32]); + let error_message = "Channel force-closed"; // Test the API functions. check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key); @@ -11909,9 +12909,9 @@ mod tests { check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key); - check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key); + check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key); - check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key); + check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key); check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key); @@ -11933,15 +12933,16 @@ mod tests { // Dummy values let channel_id = ChannelId::from_bytes([4; 32]); + let error_message = "Channel force-closed"; // Test the API functions. check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42)); check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); - check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id); - check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id); check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id); @@ -11983,14 +12984,15 @@ mod tests { check_added_monitors!(nodes[0], 1); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); } - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source( + &nodes[0].keys_manager); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); // Further, because all of our channels with nodes[0] are inbound, and none of them funded, // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS @@ -12038,11 +13040,11 @@ mod tests { for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 { nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]); - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); // Of course, however, outbound channels are always allowed nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap(); @@ -12078,14 +13080,14 @@ mod tests { for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER { nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be // rejected. nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); // but we can still open an outbound channel. nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); @@ -12094,7 +13096,7 @@ mod tests { // but even with such an outbound channel, additional inbound channels will still fail. nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); } #[test] @@ -12130,7 +13132,7 @@ mod tests { _ => panic!("Unexpected event"), } get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk); - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } // If we try to accept a channel from another peer non-0conf it will fail. @@ -12152,7 +13154,7 @@ mod tests { _ => panic!("Unexpected event"), } assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); // ...however if we accept the same channel 0conf it should work just fine. nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); @@ -12186,7 +13188,7 @@ mod tests { }; // Check that if the amount we received + the penultimate hop extra fee is less than the sender // intended amount, we fail the payment. - let current_height: u32 = node[0].node.best_block.read().unwrap().height(); + let current_height: u32 = node[0].node.best_block.read().unwrap().height; if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) = create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat), @@ -12206,7 +13208,7 @@ mod tests { }), custom_tlvs: Vec::new(), }; - let current_height: u32 = node[0].node.best_block.read().unwrap().height(); + let current_height: u32 = node[0].node.best_block.read().unwrap().height; assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat), current_height, node[0].node.default_configuration.accept_mpp_keysend).is_ok()); @@ -12219,7 +13221,7 @@ mod tests { let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]); let node = create_network(1, &node_cfg, &node_chanmgr); - let current_height: u32 = node[0].node.best_block.read().unwrap().height(); + let current_height: u32 = node[0].node.best_block.read().unwrap().height; let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive { sender_intended_htlc_amt_msat: 100, cltv_expiry_height: 22, @@ -12294,16 +13296,17 @@ mod tests { anchors_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let error_message = "Channel force-closed"; nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap(); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx()); + assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx()); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); } _ => panic!("Unexpected event"), } @@ -12312,7 +13315,7 @@ mod tests { nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); + assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); // Since nodes[1] should not have accepted the channel, it should // not have generated any events. @@ -12411,15 +13414,16 @@ mod tests { let user_config = test_default_channel_config(); let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfg, &node_chanmgr); + let error_message = "Channel force-closed"; // Open a channel, immediately disconnect each other, and broadcast Alice's latest state. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); { let txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); @@ -12501,7 +13505,7 @@ mod tests { let (scid_1, scid_2) = (42, 43); - let mut forward_htlcs = HashMap::new(); + let mut forward_htlcs = new_hash_map(); forward_htlcs.insert(scid_1, dummy_htlcs_1.clone()); forward_htlcs.insert(scid_2, dummy_htlcs_2.clone()); @@ -12537,10 +13541,12 @@ pub mod bench { use crate::util::test_utils; use crate::util::config::{UserConfig, MaxDustHTLCExposure}; + use bitcoin::amount::Amount; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::{Transaction, TxOut}; + use bitcoin::transaction::Version; use crate::sync::{Arc, Mutex, RwLock}; @@ -12617,8 +13623,8 @@ pub mod bench { let tx; if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) { - tx = Transaction { version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: 8_000_000, script_pubkey: output_script, + tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(8_000_000), script_pubkey: output_script, }]}; node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap(); } else { panic!(); } @@ -12645,7 +13651,7 @@ pub mod bench { assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]); - let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]); + let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]); Listen::block_connected(&node_a, &block, 1); Listen::block_connected(&node_b, &block, 1);