X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=sidebyside;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=274edf6d7b4238495eb87390bb8453ad76ca3a8b;hb=refs%2Fheads%2Fupstream%2Fmain;hp=51b3fc1539bcd9edc04f355ece36da40598db5b6;hpb=df0120809f3453aa4cfc6ca4d34bbedcc823928d;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 51b3fc15..b14b6e60 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -21,7 +21,7 @@ use bitcoin::blockdata::block::Header; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::ChainHash; use bitcoin::key::constants::SECRET_KEY_SIZE; -use bitcoin::network::constants::Network; +use bitcoin::network::Network; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; @@ -31,6 +31,7 @@ use bitcoin::secp256k1::{SecretKey,PublicKey}; use bitcoin::secp256k1::Secp256k1; use bitcoin::{secp256k1, Sequence}; +use crate::blinded_path::message::{MessageContext, OffersContext}; use crate::blinded_path::{BlindedPath, NodeIdLookUp}; use crate::blinded_path::message::ForwardNode; use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, ReceiveTlvs}; @@ -46,7 +47,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa use crate::ln::inbound_payment; use crate::ln::types::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; -pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails}; +use crate::ln::channel_state::ChannelDetails; use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::Bolt11InvoiceFeatures; @@ -58,7 +59,7 @@ use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING}; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration}; +use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration}; use crate::ln::wire::Encode; use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice}; use crate::offers::invoice_error::InvoiceError; @@ -66,6 +67,7 @@ use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder}; use crate::offers::offer::{Offer, OfferBuilder}; use crate::offers::parse::Bolt12SemanticError; use crate::offers::refund::{Refund, RefundBuilder}; +use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler}; use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction}; use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider}; @@ -105,7 +107,7 @@ use core::time::Duration; use core::ops::Deref; // Re-export this for use in the public API. -pub use crate::ln::outbound_payment::{PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; +pub use crate::ln::outbound_payment::{Bolt12PaymentError, PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; use crate::ln::script::ShutdownScript; // We hold various information about HTLC relay in the HTLC objects in Channel itself: @@ -306,6 +308,7 @@ pub(super) struct PendingAddHTLCInfo { // Note that this may be an outbound SCID alias for the associated channel. prev_short_channel_id: u64, prev_htlc_id: u64, + prev_counterparty_node_id: Option, prev_channel_id: ChannelId, prev_funding_outpoint: OutPoint, prev_user_channel_id: u128, @@ -349,9 +352,10 @@ pub(crate) struct HTLCPreviousHopData { blinded_failure: Option, channel_id: ChannelId, - // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards + // These fields are consumed by `claim_funds_from_hop()` when updating a force-closed backwards // channel with a preimage provided by the forward channel. outpoint: OutPoint, + counterparty_node_id: Option, } enum OnionPayload { @@ -472,7 +476,7 @@ impl_writeable_tlv_based_enum!(SentHTLCId, }, (2, OutboundRoute) => { (0, session_priv, required), - }; + }, ); @@ -636,7 +640,7 @@ impl MsgHandleErrInternal { err: msg, action: msgs::ErrorAction::IgnoreError, }, - ChannelError::Close(msg) => LightningError { + ChannelError::Close((msg, _reason)) => LightningError { err: msg.clone(), action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -666,7 +670,7 @@ pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; /// be sent in the order they appear in the return value, however sometimes the order needs to be /// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order /// they were originally sent). In those cases, this enum is also returned. -#[derive(Clone, PartialEq)] +#[derive(Clone, PartialEq, Debug)] pub(super) enum RAACommitmentOrder { /// Send the CommitmentUpdate messages first CommitmentFirst, @@ -681,6 +685,7 @@ struct ClaimingPayment { receiver_node_id: PublicKey, htlcs: Vec, sender_intended_value: Option, + onion_fields: Option, } impl_writeable_tlv_based!(ClaimingPayment, { (0, amount_msat, required), @@ -688,6 +693,7 @@ impl_writeable_tlv_based!(ClaimingPayment, { (4, receiver_node_id, required), (5, htlcs, optional_vec), (7, sender_intended_value, option), + (9, onion_fields, option), }); struct ClaimablePayment { @@ -753,13 +759,55 @@ enum BackgroundEvent { }, } +/// A pointer to a channel that is unblocked when an event is surfaced +#[derive(Debug)] +pub(crate) struct EventUnblockedChannel { + counterparty_node_id: PublicKey, + funding_txo: OutPoint, + channel_id: ChannelId, + blocking_action: RAAMonitorUpdateBlockingAction, +} + +impl Writeable for EventUnblockedChannel { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + self.counterparty_node_id.write(writer)?; + self.funding_txo.write(writer)?; + self.channel_id.write(writer)?; + self.blocking_action.write(writer) + } +} + +impl MaybeReadable for EventUnblockedChannel { + fn read(reader: &mut R) -> Result, DecodeError> { + let counterparty_node_id = Readable::read(reader)?; + let funding_txo = Readable::read(reader)?; + let channel_id = Readable::read(reader)?; + let blocking_action = match RAAMonitorUpdateBlockingAction::read(reader)? { + Some(blocking_action) => blocking_action, + None => return Ok(None), + }; + Ok(Some(EventUnblockedChannel { + counterparty_node_id, + funding_txo, + channel_id, + blocking_action, + })) + } +} + #[derive(Debug)] pub(crate) enum MonitorUpdateCompletionAction { /// Indicates that a payment ultimately destined for us was claimed and we should emit an /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for /// this payment. Note that this is only best-effort. On restart it's possible such a duplicate /// event can be generated. - PaymentClaimed { payment_hash: PaymentHash }, + PaymentClaimed { + payment_hash: PaymentHash, + /// A pending MPP claim which hasn't yet completed. + /// + /// Not written to disk. + pending_mpp_claim: Option<(PublicKey, ChannelId, u64, PendingMPPClaimPointer)>, + }, /// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the /// operation of another channel. /// @@ -770,7 +818,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// outbound edge. EmitEventAndFreeOtherChannel { event: events::Event, - downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>, + downstream_counterparty_and_funding_outpoint: Option, }, /// Indicates we should immediately resume the operation of another channel, unless there is /// some other reason why the channel is blocked. In practice this simply means immediately @@ -793,13 +841,16 @@ pub(crate) enum MonitorUpdateCompletionAction { } impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, - (0, PaymentClaimed) => { (0, payment_hash, required) }, + (0, PaymentClaimed) => { + (0, payment_hash, required), + (9999999999, pending_mpp_claim, (static_value, None)), + }, // Note that FreeOtherChannelImmediately should never be written - we were supposed to free // *immediately*. However, for simplicity we implement read/write here. (1, FreeOtherChannelImmediately) => { (0, downstream_counterparty_node_id, required), (2, downstream_funding_outpoint, required), - (4, blocking_action, required), + (4, blocking_action, upgradable_required), // Note that by the time we get past the required read above, downstream_funding_outpoint will be // filled in, so we can safely unwrap it here. (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))), @@ -811,7 +862,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, // monitor updates which aren't properly blocked or resumed, however that's fine - we don't // support async monitor updates even in LDK 0.0.116 and once we do we'll require no // downgrades to prior versions. - (1, downstream_counterparty_and_funding_outpoint, option), + (1, downstream_counterparty_and_funding_outpoint, upgradable_option), }, ); @@ -830,9 +881,29 @@ impl_writeable_tlv_based_enum!(EventCompletionAction, // Note that by the time we get past the required read above, channel_funding_outpoint will be // filled in, so we can safely unwrap it here. (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))), - }; + } ); +#[derive(Debug)] +pub(crate) struct PendingMPPClaim { + channels_without_preimage: Vec<(PublicKey, OutPoint, ChannelId, u64)>, + channels_with_preimage: Vec<(PublicKey, OutPoint, ChannelId)>, +} + +#[derive(Clone)] +pub(crate) struct PendingMPPClaimPointer(Arc>); + +impl PartialEq for PendingMPPClaimPointer { + fn eq(&self, o: &Self) -> bool { Arc::ptr_eq(&self.0, &o.0) } +} +impl Eq for PendingMPPClaimPointer {} + +impl core::fmt::Debug for PendingMPPClaimPointer { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + self.0.lock().unwrap().fmt(f) + } +} + #[derive(Clone, PartialEq, Eq, Debug)] /// If something is blocked on the completion of an RAA-generated [`ChannelMonitorUpdate`] we track /// the blocked action here. See enum variants for more info. @@ -846,6 +917,16 @@ pub(crate) enum RAAMonitorUpdateBlockingAction { /// The HTLC ID on the inbound edge. htlc_id: u64, }, + /// We claimed an MPP payment across multiple channels. We have to block removing the payment + /// preimage from any monitor until the last monitor is updated to contain the payment + /// preimage. Otherwise we may not be able to replay the preimage on the monitor(s) that + /// weren't updated on startup. + /// + /// This variant is *not* written to disk, instead being inferred from [`ChannelMonitor`] + /// state. + ClaimedMPPPayment { + pending_claim: PendingMPPClaimPointer, + } } impl RAAMonitorUpdateBlockingAction { @@ -857,10 +938,16 @@ impl RAAMonitorUpdateBlockingAction { } } -impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction, - (0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) } -;); +impl_writeable_tlv_based_enum_upgradable!(RAAMonitorUpdateBlockingAction, + (0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) }, + unread_variants: ClaimedMPPPayment +); +impl Readable for Option { + fn read(reader: &mut R) -> Result { + Ok(RAAMonitorUpdateBlockingAction::read(reader)?) + } +} /// State we hold per-peer. pub(super) struct PeerState where SP::Target: SignerProvider { @@ -960,6 +1047,11 @@ pub(super) struct InboundChannelRequest { /// accepted. An unaccepted channel that exceeds this limit will be abandoned. const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2; +/// The number of blocks of historical feerate estimates we keep around and consider when deciding +/// to force-close a channel for having too-low fees. Also the number of blocks we have to see +/// after startup before we consider force-closing channels for having too-low fees. +pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144; + /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is /// actually ours and not some duplicate HTLC sent to us by a node along the route. /// @@ -1168,7 +1260,7 @@ where /// /// ``` /// use bitcoin::BlockHash; -/// use bitcoin::network::constants::Network; +/// use bitcoin::network::Network; /// use lightning::chain::BestBlock; /// # use lightning::chain::channelmonitor::ChannelMonitor; /// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs}; @@ -1354,11 +1446,12 @@ where /// # /// # fn example(channel_manager: T) { /// # let channel_manager = channel_manager.get_cm(); +/// # let error_message = "Channel force-closed"; /// channel_manager.process_pending_events(&|event| match event { /// Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { /// if !is_trusted(counterparty_node_id) { /// match channel_manager.force_close_without_broadcasting_txn( -/// &temporary_channel_id, &counterparty_node_id +/// &temporary_channel_id, &counterparty_node_id, error_message.to_string() /// ) { /// Ok(()) => println!("Rejecting channel {}", temporary_channel_id), /// Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e), @@ -1551,8 +1644,9 @@ where /// # /// # fn example(channel_manager: T) -> Result<(), Bolt12SemanticError> { /// # let channel_manager = channel_manager.get_cm(); +/// # let absolute_expiry = None; /// let offer = channel_manager -/// .create_offer_builder()? +/// .create_offer_builder(absolute_expiry)? /// # ; /// # // Needed for compiling for c_bindings /// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into(); @@ -2094,6 +2188,21 @@ where /// Tracks the message events that are to be broadcasted when we are connected to some peer. pending_broadcast_messages: Mutex>, + /// We only want to force-close our channels on peers based on stale feerates when we're + /// confident the feerate on the channel is *really* stale, not just became stale recently. + /// Thus, we store the fee estimates we had as of the last [`FEERATE_TRACKING_BLOCKS`] blocks + /// (after startup completed) here, and only force-close when channels have a lower feerate + /// than we predicted any time in the last [`FEERATE_TRACKING_BLOCKS`] blocks. + /// + /// We only keep this in memory as we assume any feerates we receive immediately after startup + /// may be bunk (as they often are if Bitcoin Core crashes) and want to delay taking any + /// actions for a day anyway. + /// + /// The first element in the pair is the + /// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] estimate, the second the + /// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`] estimate. + last_days_feerates: Mutex>, + entropy_source: ES, node_signer: NS, signer_provider: SP, @@ -2284,337 +2393,18 @@ const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50; /// many peers we reject new (inbound) connections. const MAX_NO_CHANNEL_PEERS: usize = 250; -/// Information needed for constructing an invoice route hint for this channel. -#[derive(Clone, Debug, PartialEq)] -pub struct CounterpartyForwardingInfo { - /// Base routing fee in millisatoshis. - pub fee_base_msat: u32, - /// Amount in millionths of a satoshi the channel will charge per transferred satoshi. - pub fee_proportional_millionths: u32, - /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart, - /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s - /// `cltv_expiry_delta` for more details. - pub cltv_expiry_delta: u16, -} - -/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`] -/// to better separate parameters. -#[derive(Clone, Debug, PartialEq)] -pub struct ChannelCounterparty { - /// The node_id of our counterparty - pub node_id: PublicKey, - /// The Features the channel counterparty provided upon last connection. - /// Useful for routing as it is the most up-to-date copy of the counterparty's features and - /// many routing-relevant features are present in the init context. - pub features: InitFeatures, - /// The value, in satoshis, that must always be held in the channel for our counterparty. This - /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by - /// claiming at least this value on chain. - /// - /// This value is not included in [`inbound_capacity_msat`] as it can never be spent. - /// - /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat - pub unspendable_punishment_reserve: u64, - /// Information on the fees and requirements that the counterparty requires when forwarding - /// payments to us through this channel. - pub forwarding_info: Option, - /// The smallest value HTLC (in msat) the remote peer will accept, for this channel. This field - /// is only `None` before we have received either the `OpenChannel` or `AcceptChannel` message - /// from the remote peer, or for `ChannelCounterparty` objects serialized prior to LDK 0.0.107. - pub outbound_htlc_minimum_msat: Option, - /// The largest value HTLC (in msat) the remote peer currently will accept, for this channel. - pub outbound_htlc_maximum_msat: Option, -} - -/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`] -#[derive(Clone, Debug, PartialEq)] -pub struct ChannelDetails { - /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes, - /// thereafter this is the txid of the funding transaction xor the funding transaction output). - /// Note that this means this value is *not* persistent - it can change once during the - /// lifetime of the channel. - pub channel_id: ChannelId, - /// Parameters which apply to our counterparty. See individual fields for more information. - pub counterparty: ChannelCounterparty, - /// The Channel's funding transaction output, if we've negotiated the funding transaction with - /// our counterparty already. - pub funding_txo: Option, - /// The features which this channel operates with. See individual features for more info. - /// - /// `None` until negotiation completes and the channel type is finalized. - pub channel_type: Option, - /// The position of the funding transaction in the chain. None if the funding transaction has - /// not yet been confirmed and the channel fully opened. - /// - /// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound - /// payments instead of this. See [`get_inbound_payment_scid`]. - /// - /// For channels with [`confirmations_required`] set to `Some(0)`, [`outbound_scid_alias`] may - /// be used in place of this in outbound routes. See [`get_outbound_payment_scid`]. - /// - /// [`inbound_scid_alias`]: Self::inbound_scid_alias - /// [`outbound_scid_alias`]: Self::outbound_scid_alias - /// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid - /// [`get_outbound_payment_scid`]: Self::get_outbound_payment_scid - /// [`confirmations_required`]: Self::confirmations_required - pub short_channel_id: Option, - /// An optional [`short_channel_id`] alias for this channel, randomly generated by us and - /// usable in place of [`short_channel_id`] to reference the channel in outbound routes when - /// the channel has not yet been confirmed (as long as [`confirmations_required`] is - /// `Some(0)`). - /// - /// This will be `None` as long as the channel is not available for routing outbound payments. - /// - /// [`short_channel_id`]: Self::short_channel_id - /// [`confirmations_required`]: Self::confirmations_required - pub outbound_scid_alias: Option, - /// An optional [`short_channel_id`] alias for this channel, randomly generated by our - /// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our - /// counterparty will recognize the alias provided here in place of the [`short_channel_id`] - /// when they see a payment to be routed to us. - /// - /// Our counterparty may choose to rotate this value at any time, though will always recognize - /// previous values for inbound payment forwarding. - /// - /// [`short_channel_id`]: Self::short_channel_id - pub inbound_scid_alias: Option, - /// The value, in satoshis, of this channel as appears in the funding output - pub channel_value_satoshis: u64, - /// The value, in satoshis, that must always be held in the channel for us. This value ensures - /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least - /// this value on chain. - /// - /// This value is not included in [`outbound_capacity_msat`] as it can never be spent. - /// - /// This value will be `None` for outbound channels until the counterparty accepts the channel. - /// - /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat - pub unspendable_punishment_reserve: Option, - /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. This may be zero for objects - /// serialized with LDK versions prior to 0.0.113. - /// - /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel - /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels - pub user_channel_id: u128, - /// The currently negotiated fee rate denominated in satoshi per 1000 weight units, - /// which is applied to commitment and HTLC transactions. - /// - /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115. - pub feerate_sat_per_1000_weight: Option, - /// Our total balance. This is the amount we would get if we close the channel. - /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this - /// amount is not likely to be recoverable on close. - /// - /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose - /// balance is not available for inclusion in new outbound HTLCs). This further does not include - /// any pending outgoing HTLCs which are awaiting some other resolution to be sent. - /// This does not consider any on-chain fees. - /// - /// See also [`ChannelDetails::outbound_capacity_msat`] - pub balance_msat: u64, - /// The available outbound capacity for sending HTLCs to the remote peer. This does not include - /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not - /// available for inclusion in new outbound HTLCs). This further does not include any pending - /// outgoing HTLCs which are awaiting some other resolution to be sent. - /// - /// See also [`ChannelDetails::balance_msat`] - /// - /// This value is not exact. Due to various in-flight changes, feerate changes, and our - /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we - /// should be able to spend nearly this amount. - pub outbound_capacity_msat: u64, - /// The available outbound capacity for sending a single HTLC to the remote peer. This is - /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by - /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us - /// to use a limit as close as possible to the HTLC limit we can currently send. - /// - /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`], - /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`]. - pub next_outbound_htlc_limit_msat: u64, - /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of - /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than - /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a - /// route which is valid. - pub next_outbound_htlc_minimum_msat: u64, - /// The available inbound capacity for the remote peer to send HTLCs to us. This does not - /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not - /// available for inclusion in new inbound HTLCs). - /// Note that there are some corner cases not fully handled here, so the actual available - /// inbound capacity may be slightly higher than this. - /// - /// This value is not exact. Due to various in-flight changes, feerate changes, and our - /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable. - /// However, our counterparty should be able to spend nearly this amount. - pub inbound_capacity_msat: u64, - /// The number of required confirmations on the funding transaction before the funding will be - /// considered "locked". This number is selected by the channel fundee (i.e. us if - /// [`is_outbound`] is *not* set), and can be selected for inbound channels with - /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with - /// [`ChannelHandshakeLimits::max_minimum_depth`]. - /// - /// This value will be `None` for outbound channels until the counterparty accepts the channel. - /// - /// [`is_outbound`]: ChannelDetails::is_outbound - /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth - /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth - pub confirmations_required: Option, - /// The current number of confirmations on the funding transaction. - /// - /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113. - pub confirmations: Option, - /// The number of blocks (after our commitment transaction confirms) that we will need to wait - /// until we can claim our funds after we force-close the channel. During this time our - /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty - /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any - /// time to claim our non-HTLC-encumbered funds. - /// - /// This value will be `None` for outbound channels until the counterparty accepts the channel. - pub force_close_spend_delay: Option, - /// True if the channel was initiated (and thus funded) by us. - pub is_outbound: bool, - /// True if the channel is confirmed, channel_ready messages have been exchanged, and the - /// channel is not currently being shut down. `channel_ready` message exchange implies the - /// required confirmation count has been reached (and we were connected to the peer at some - /// point after the funding transaction received enough confirmations). The required - /// confirmation count is provided in [`confirmations_required`]. - /// - /// [`confirmations_required`]: ChannelDetails::confirmations_required - pub is_channel_ready: bool, - /// The stage of the channel's shutdown. - /// `None` for `ChannelDetails` serialized on LDK versions prior to 0.0.116. - pub channel_shutdown_state: Option, - /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b) - /// the peer is connected, and (c) the channel is not currently negotiating a shutdown. - /// - /// This is a strict superset of `is_channel_ready`. - pub is_usable: bool, - /// True if this channel is (or will be) publicly-announced. - pub is_public: bool, - /// The smallest value HTLC (in msat) we will accept, for this channel. This field - /// is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.107 - pub inbound_htlc_minimum_msat: Option, - /// The largest value HTLC (in msat) we currently will accept, for this channel. - pub inbound_htlc_maximum_msat: Option, - /// Set of configurable parameters that affect channel operation. - /// - /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109. - pub config: Option, - /// Pending inbound HTLCs. - /// - /// This field is empty for objects serialized with LDK versions prior to 0.0.122. - pub pending_inbound_htlcs: Vec, - /// Pending outbound HTLCs. - /// - /// This field is empty for objects serialized with LDK versions prior to 0.0.122. - pub pending_outbound_htlcs: Vec, -} - -impl ChannelDetails { - /// Gets the current SCID which should be used to identify this channel for inbound payments. - /// This should be used for providing invoice hints or in any other context where our - /// counterparty will forward a payment to us. - /// - /// This is either the [`ChannelDetails::inbound_scid_alias`], if set, or the - /// [`ChannelDetails::short_channel_id`]. See those for more information. - pub fn get_inbound_payment_scid(&self) -> Option { - self.inbound_scid_alias.or(self.short_channel_id) - } - - /// Gets the current SCID which should be used to identify this channel for outbound payments. - /// This should be used in [`Route`]s to describe the first hop or in other contexts where - /// we're sending or forwarding a payment outbound over this channel. - /// - /// This is either the [`ChannelDetails::short_channel_id`], if set, or the - /// [`ChannelDetails::outbound_scid_alias`]. See those for more information. - pub fn get_outbound_payment_scid(&self) -> Option { - self.short_channel_id.or(self.outbound_scid_alias) - } - - fn from_channel_context( - context: &ChannelContext, best_block_height: u32, latest_features: InitFeatures, - fee_estimator: &LowerBoundedFeeEstimator - ) -> Self - where - SP::Target: SignerProvider, - F::Target: FeeEstimator - { - let balance = context.get_available_balances(fee_estimator); - let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - context.get_holder_counterparty_selected_channel_reserve_satoshis(); - ChannelDetails { - channel_id: context.channel_id(), - counterparty: ChannelCounterparty { - node_id: context.get_counterparty_node_id(), - features: latest_features, - unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: context.counterparty_forwarding_info(), - // Ensures that we have actually received the `htlc_minimum_msat` value - // from the counterparty through the `OpenChannel` or `AcceptChannel` - // message (as they are always the first message from the counterparty). - // Else `Channel::get_counterparty_htlc_minimum_msat` could return the - // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if context.have_received_message() { - Some(context.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: context.get_counterparty_htlc_maximum_msat(), - }, - funding_txo: context.get_funding_txo(), - // Note that accept_channel (or open_channel) is always the first message, so - // `have_received_message` indicates that type negotiation has completed. - channel_type: if context.have_received_message() { Some(context.get_channel_type().clone()) } else { None }, - short_channel_id: context.get_short_channel_id(), - outbound_scid_alias: if context.is_usable() { Some(context.outbound_scid_alias()) } else { None }, - inbound_scid_alias: context.latest_inbound_scid_alias(), - channel_value_satoshis: context.get_value_satoshis(), - feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()), - unspendable_punishment_reserve: to_self_reserve_satoshis, - balance_msat: balance.balance_msat, - inbound_capacity_msat: balance.inbound_capacity_msat, - outbound_capacity_msat: balance.outbound_capacity_msat, - next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, - next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat, - user_channel_id: context.get_user_id(), - confirmations_required: context.minimum_depth(), - confirmations: Some(context.get_funding_tx_confirmations(best_block_height)), - force_close_spend_delay: context.get_counterparty_selected_contest_delay(), - is_outbound: context.is_outbound(), - is_channel_ready: context.is_usable(), - is_usable: context.is_live(), - is_public: context.should_announce(), - inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(), - config: Some(context.config()), - channel_shutdown_state: Some(context.shutdown_state()), - pending_inbound_htlcs: context.get_pending_inbound_htlc_details(), - pending_outbound_htlcs: context.get_pending_outbound_htlc_details(), - } - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -/// Further information on the details of the channel shutdown. -/// Upon channels being forced closed (i.e. commitment transaction confirmation detected -/// by `ChainMonitor`), ChannelShutdownState will be set to `ShutdownComplete` or -/// the channel will be removed shortly. -/// Also note, that in normal operation, peers could disconnect at any of these states -/// and require peer re-connection before making progress onto other states -pub enum ChannelShutdownState { - /// Channel has not sent or received a shutdown message. - NotShuttingDown, - /// Local node has sent a shutdown message for this channel. - ShutdownInitiated, - /// Shutdown message exchanges have concluded and the channels are in the midst of - /// resolving all existing open HTLCs before closing can continue. - ResolvingHTLCs, - /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates. - NegotiatingClosingFee, - /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about - /// to drop the channel. - ShutdownComplete, -} +/// The maximum expiration from the current time where an [`Offer`] or [`Refund`] is considered +/// short-lived, while anything with a greater expiration is considered long-lived. +/// +/// Using [`ChannelManager::create_offer_builder`] or [`ChannelManager::create_refund_builder`], +/// will included a [`BlindedPath`] created using: +/// - [`MessageRouter::create_compact_blinded_paths`] when short-lived, and +/// - [`MessageRouter::create_blinded_paths`] when long-lived. +/// +/// Using compact [`BlindedPath`]s may provide better privacy as the [`MessageRouter`] could select +/// more hops. However, since they use short channel ids instead of pubkeys, they are more likely to +/// become invalid over time as channels are closed. Thus, they are only suitable for short-term use. +pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24); /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments. /// These include payments that have yet to find a successful path, or have unresolved HTLCs. @@ -2761,11 +2551,10 @@ macro_rules! convert_chan_phase_err { ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id)) }, - ChannelError::Close(msg) => { + ChannelError::Close((msg, reason)) => { let logger = WithChannelContext::from(&$self.logger, &$channel.context, None); log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg); update_maps_on_chan_removal!($self, $channel.context); - let reason = ClosureReason::ProcessingError { err: msg.clone() }; let shutdown_res = $channel.context.force_shutdown(true, reason); let err = MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update); @@ -3191,6 +2980,8 @@ where pending_offers_messages: Mutex::new(Vec::new()), pending_broadcast_messages: Mutex::new(Vec::new()), + last_days_feerates: Mutex::new(VecDeque::new()), + entropy_source, node_signer, signer_provider, @@ -3283,7 +3074,7 @@ where let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, - self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id) + self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger) { Ok(res) => res, Err(e) => { @@ -3482,7 +3273,7 @@ where } } else { let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); - shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed)); + shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) })); } }, hash_map::Entry::Vacant(_) => { @@ -3651,7 +3442,7 @@ where let closure_reason = if let Some(peer_msg) = peer_msg { ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) } } else { - ClosureReason::HolderForceClosed + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) } }; let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) { @@ -3698,8 +3489,11 @@ where Ok(counterparty_node_id) } - fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { + fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String) + -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + log_debug!(self.logger, + "Force-closing channel, The error message sent to the peer : {}", error_message); match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) { Ok(counterparty_node_id) => { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -3708,8 +3502,8 @@ where peer_state.pending_msg_events.push( events::MessageSendEvent::HandleError { node_id: counterparty_node_id, - action: msgs::ErrorAction::DisconnectPeer { - msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }) + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message } }, } ); @@ -3720,39 +3514,53 @@ where } } - /// Force closes a channel, immediately broadcasting the latest local transaction(s) and - /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to - /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding - /// channel. - pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) + /// Force closes a channel, immediately broadcasting the latest local transaction(s), + /// rejecting new HTLCs. + /// + /// The provided `error_message` is sent to connected peers for closing + /// channels and should be a human-readable description of what went wrong. + /// + /// Fails if `channel_id` is unknown to the manager, or if the `counterparty_node_id` + /// isn't the counterparty of the corresponding channel. + pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String) -> Result<(), APIError> { - self.force_close_sending_error(channel_id, counterparty_node_id, true) + self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message) } /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting - /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the - /// `counterparty_node_id` isn't the counterparty of the corresponding channel. + /// the latest local transaction(s). + /// + /// The provided `error_message` is sent to connected peers for closing channels and should + /// be a human-readable description of what went wrong. /// + /// Fails if `channel_id` is unknown to the manager, or if the + /// `counterparty_node_id` isn't the counterparty of the corresponding channel. /// You can always broadcast the latest local transaction(s) via /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. - pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) + pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String) -> Result<(), APIError> { - self.force_close_sending_error(channel_id, counterparty_node_id, false) + self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message) } /// Force close all channels, immediately broadcasting the latest local commitment transaction /// for each to the chain and rejecting new HTLCs on each. - pub fn force_close_all_channels_broadcasting_latest_txn(&self) { + /// + /// The provided `error_message` is sent to connected peers for closing channels and should + /// be a human-readable description of what went wrong. + pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) { for chan in self.list_channels() { - let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id); + let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone()); } } /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest /// local transaction(s). - pub fn force_close_all_channels_without_broadcasting_txn(&self) { + /// + /// The provided `error_message` is sent to connected peers for closing channels and + /// should be a human-readable description of what went wrong. + pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) { for chan in self.list_channels() { - let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id); + let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone()); } } @@ -3782,7 +3590,7 @@ where // peer has been disabled for some time), return `channel_disabled`, // otherwise return `temporary_channel_failure`. let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok(); - if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) { + if chan_update_opt.as_ref().map(|u| u.contents.channel_flags & 2 == 2).unwrap_or(false) { return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt)); } else { return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt)); @@ -4061,7 +3869,8 @@ where chain_hash: self.chain_hash, short_channel_id, timestamp: chan.context.get_update_time_counter(), - flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), + message_flags: 1, // Only must_be_one + channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), cltv_expiry_delta: chan.context.get_cltv_expiry_delta(), htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(), htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(), @@ -4273,14 +4082,43 @@ where self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata); } - pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> { + /// Pays the [`Bolt12Invoice`] associated with the `payment_id` encoded in its `payer_metadata`. + /// + /// The invoice's `payer_metadata` is used to authenticate that the invoice was indeed requested + /// before attempting a payment. [`Bolt12PaymentError::UnexpectedInvoice`] is returned if this + /// fails or if the encoded `payment_id` is not recognized. The latter may happen once the + /// payment is no longer tracked because the payment was attempted after: + /// - an invoice for the `payment_id` was already paid, + /// - one full [timer tick] has elapsed since initially requesting the invoice when paying an + /// offer, or + /// - the refund corresponding to the invoice has already expired. + /// + /// To retry the payment, request another invoice using a new `payment_id`. + /// + /// Attempting to pay the same invoice twice while the first payment is still pending will + /// result in a [`Bolt12PaymentError::DuplicateInvoice`]. + /// + /// Otherwise, either [`Event::PaymentSent`] or [`Event::PaymentFailed`] are used to indicate + /// whether or not the payment was successful. + /// + /// [timer tick]: Self::timer_tick_occurred + pub fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice) -> Result<(), Bolt12PaymentError> { + let secp_ctx = &self.secp_ctx; + let expanded_key = &self.inbound_payment_key; + match invoice.verify(expanded_key, secp_ctx) { + Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id), + Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice), + } + } + + fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments .send_payment_for_bolt12_invoice( invoice, payment_id, &self.router, self.list_usable_channels(), - || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, - best_block_height, &self.logger, &self.pending_events, + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self, + &self.secp_ctx, best_block_height, &self.logger, &self.pending_events, |args| self.send_payment_along_path(args) ) } @@ -4499,10 +4337,9 @@ where Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => { macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { { let counterparty; - let err = if let ChannelError::Close(msg) = $err { + let err = if let ChannelError::Close((msg, reason)) = $err { let channel_id = $chan.context.channel_id(); counterparty = chan.context.get_counterparty_node_id(); - let reason = ClosureReason::ProcessingError { err: msg.clone() }; let shutdown_res = $chan.context.force_shutdown(false, reason); MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None) } else { unreachable!(); }; @@ -4515,7 +4352,7 @@ where match find_funding_output(&chan, &funding_transaction) { Ok(found_funding_txo) => funding_txo = found_funding_txo, Err(err) => { - let chan_err = ChannelError::Close(err.to_owned()); + let chan_err = ChannelError::close(err.to_owned()); let api_err = APIError::APIMisuseError { err: err.to_owned() }; return close_chan!(chan_err, api_err, chan); }, @@ -4633,7 +4470,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut result = Ok(()); - if !funding_transaction.is_coin_base() { + if !funding_transaction.is_coinbase() { for inp in funding_transaction.input.iter() { if inp.witness.is_empty() { result = result.and(Err(APIError::APIMisuseError { @@ -4689,9 +4526,9 @@ where is_batch_funding, |chan, tx| { let mut output_index = None; - let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh(); + let expected_spk = chan.context.get_funding_redeemscript().to_p2wsh(); for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { + if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.context.get_value_satoshis() { if output_index.is_some() { return Err("Multiple outputs matched the expected script and value"); } @@ -4941,6 +4778,7 @@ where let mut per_source_pending_forward = [( payment.prev_short_channel_id, + payment.prev_counterparty_node_id, payment.prev_funding_outpoint, payment.prev_channel_id, payment.prev_user_channel_id, @@ -4971,6 +4809,7 @@ where user_channel_id: Some(payment.prev_user_channel_id), outpoint: payment.prev_funding_outpoint, channel_id: payment.prev_channel_id, + counterparty_node_id: payment.prev_counterparty_node_id, htlc_id: payment.prev_htlc_id, incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -5100,8 +4939,10 @@ where // Process all of the forwards and failures for the channel in which the HTLCs were // proposed to as a batch. - let pending_forwards = (incoming_scid, incoming_funding_txo, incoming_channel_id, - incoming_user_channel_id, htlc_forwards.drain(..).collect()); + let pending_forwards = ( + incoming_scid, Some(incoming_counterparty_node_id), incoming_funding_txo, + incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect() + ); self.forward_htlcs_without_forward_event(&mut [pending_forwards]); for (htlc_fail, htlc_destination) in htlc_fails.drain(..) { let failure = match htlc_fail { @@ -5135,7 +4976,7 @@ where let mut new_events = VecDeque::new(); let mut failed_forwards = Vec::new(); - let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); + let mut phantom_receives: Vec<(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); { let mut forward_htlcs = new_hash_map(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); @@ -5144,12 +4985,12 @@ where if short_chan_id != 0 { let mut forwarding_counterparty = None; macro_rules! forwarding_channel_not_found { - () => { - for forward_info in pending_forwards.drain(..) { + ($forward_infos: expr) => { + for forward_info in $forward_infos { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, - prev_user_channel_id, forward_info: PendingHTLCInfo { + prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, .. } @@ -5164,6 +5005,7 @@ where user_channel_id: Some(prev_user_channel_id), channel_id: prev_channel_id, outpoint: prev_funding_outpoint, + counterparty_node_id: prev_counterparty_node_id, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret: $phantom_ss, @@ -5226,7 +5068,10 @@ where outgoing_cltv_value, Some(phantom_shared_secret), false, None, current_height, self.default_configuration.accept_mpp_keysend) { - Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])), + Ok(info) => phantom_receives.push(( + prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, + prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)] + )), Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, @@ -5253,7 +5098,7 @@ where let (counterparty_node_id, forward_chan_id) = match chan_info_opt { Some((cp_id, chan_id)) => (cp_id, chan_id), None => { - forwarding_channel_not_found!(); + forwarding_channel_not_found!(pending_forwards.drain(..)); continue; } }; @@ -5261,103 +5106,156 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); if peer_state_mutex_opt.is_none() { - forwarding_channel_not_found!(); + forwarding_channel_not_found!(pending_forwards.drain(..)); continue; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); - for forward_info in pending_forwards.drain(..) { - let queue_fail_htlc_res = match forward_info { - HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, - prev_user_channel_id, forward_info: PendingHTLCInfo { - incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, - routing: PendingHTLCRouting::Forward { - onion_packet, blinded, .. - }, skimmed_fee_msat, .. + let mut draining_pending_forwards = pending_forwards.drain(..); + while let Some(forward_info) = draining_pending_forwards.next() { + let queue_fail_htlc_res = match forward_info { + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo { + incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, + routing: PendingHTLCRouting::Forward { + ref onion_packet, blinded, .. + }, skimmed_fee_msat, .. + }, + }) => { + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), + counterparty_node_id: prev_counterparty_node_id, + channel_id: prev_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + // Phantom payments are only PendingHTLCRouting::Receive. + phantom_shared_secret: None, + blinded_failure: blinded.map(|b| b.failure), + }); + let next_blinding_point = blinded.and_then(|b| { + let encrypted_tlvs_ss = self.node_signer.ecdh( + Recipient::Node, &b.inbound_blinding_point, None + ).unwrap().secret_bytes(); + onion_utils::next_hop_pubkey( + &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss + ).ok() + }); + + // Forward the HTLC over the most appropriate channel with the corresponding peer, + // applying non-strict forwarding. + // The channel with the least amount of outbound liquidity will be used to maximize the + // probability of being able to successfully forward a subsequent HTLC. + let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase { + ChannelPhase::Funded(chan) => { + let balances = chan.context.get_available_balances(&self.fee_estimator); + if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat && + outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat && + chan.context.is_usable() { + Some((chan, balances)) + } else { + None + } }, - }) => { - let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash)); - log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - user_channel_id: Some(prev_user_channel_id), - channel_id: prev_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - // Phantom payments are only PendingHTLCRouting::Receive. - phantom_shared_secret: None, - blinded_failure: blinded.map(|b| b.failure), - }); - let next_blinding_point = blinded.and_then(|b| { - let encrypted_tlvs_ss = self.node_signer.ecdh( - Recipient::Node, &b.inbound_blinding_point, None - ).unwrap().secret_bytes(); - onion_utils::next_hop_pubkey( - &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss - ).ok() - }); - if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat, - payment_hash, outgoing_cltv_value, htlc_source.clone(), - onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator, - &&logger) - { - if let ChannelError::Ignore(msg) = e { - log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg); + _ => None, + }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c); + let optimal_channel = match maybe_optimal_channel { + Some(chan) => chan, + None => { + // Fall back to the specified channel to return an appropriate error. + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + chan } else { - panic!("Stated return value requirements in send_htlc() were not met"); + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; } + } + }; + + let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash)); + let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) { + "specified" + } else { + "alternate" + }; + log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}", + prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id); + if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat, + payment_hash, outgoing_cltv_value, htlc_source.clone(), + onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator, + &&logger) + { + if let ChannelError::Ignore(msg) = e { + log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(failure_code, data), HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } )); - continue; + } else { + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; } - None - }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + } + None + }, + HTLCForwardInfo::AddHTLC { .. } => { + panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => { + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id)) - }, - HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { + Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id)) + } else { + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; + } + }, + HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); let res = chan.queue_fail_malformed_htlc( htlc_id, failure_code, sha256_of_onion, &&logger ); Some((res, htlc_id)) - }, - }; - if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { - if let Err(e) = queue_fail_htlc_res { - if let ChannelError::Ignore(msg) = e { + } else { + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; + } + }, + }; + if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { + if let Err(e) = queue_fail_htlc_res { + if let ChannelError::Ignore(msg) = e { + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met"); } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; + } else { + panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met"); } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; } } - } else { - forwarding_channel_not_found!(); - continue; } } else { 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, - prev_user_channel_id, forward_info: PendingHTLCInfo { + prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, skimmed_fee_msat, .. } @@ -5395,6 +5293,7 @@ where prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + counterparty_node_id: prev_counterparty_node_id, channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, @@ -5427,6 +5326,7 @@ where failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, user_channel_id: $htlc.prev_hop.user_channel_id, + counterparty_node_id: $htlc.prev_hop.counterparty_node_id, channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: $htlc.prev_hop.htlc_id, @@ -5809,7 +5709,7 @@ where log_error!(logger, "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id); update_maps_on_chan_removal!(self, &context); - shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed)); + shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) })); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { @@ -6312,7 +6212,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - let mut sources = { + let sources = { let mut claimable_payments = self.claimable_payments.lock().unwrap(); if let Some(payment) = claimable_payments.claimable_payments.remove(&payment_hash) { let mut receiver_node_id = self.our_network_pubkey; @@ -6325,19 +6225,27 @@ where } } - let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); - let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat); - let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash, - ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(), - payment_purpose: payment.purpose, receiver_node_id, htlcs, sender_intended_value - }); - if dup_purpose.is_some() { - debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); - log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", - &payment_hash); - } + let claiming_payment = claimable_payments.pending_claiming_payments + .entry(payment_hash) + .and_modify(|_| { + debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); + log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", + &payment_hash); + }) + .or_insert_with(|| { + let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); + let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat); + ClaimingPayment { + amount_msat: payment.htlcs.iter().map(|source| source.value).sum(), + payment_purpose: payment.purpose, + receiver_node_id, + htlcs, + sender_intended_value, + onion_fields: payment.onion_fields, + } + }); - if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = payment.onion_fields { + if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = claiming_payment.onion_fields { if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) { log_info!(self.logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}", &payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0))); @@ -6399,26 +6307,46 @@ where return; } if valid_mpp { - for htlc in sources.drain(..) { - let prev_hop_chan_id = htlc.prev_hop.channel_id; - if let Err((pk, err)) = self.claim_funds_from_hop( + let pending_mpp_claim_ptr_opt = if sources.len() > 1 { + let channels_without_preimage = sources.iter().filter_map(|htlc| { + if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { + let prev_hop = &htlc.prev_hop; + Some((cp_id, prev_hop.outpoint, prev_hop.channel_id, prev_hop.htlc_id)) + } else { + None + } + }).collect(); + Some(Arc::new(Mutex::new(PendingMPPClaim { + channels_without_preimage, + channels_with_preimage: Vec::new(), + }))) + } else { + None + }; + for htlc in sources { + let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| + if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { + let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); + Some((cp_id, htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id, claim_ptr)) + } else { + None + } + ); + let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| { + RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { + pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)), + } + }); + self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, |_, definitely_duplicate| { debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment"); - Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }) + (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker) } - ) { - if let msgs::ErrorAction::IgnoreError = err.err.action { - // We got a temporary failure updating monitor, but will claim the - // HTLC when the monitor updating is restored (or on chain). - let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash)); - log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); - } else { errs.push((pk, err)); } - } + ); } - } - if !valid_mpp { - for htlc in sources.drain(..) { + } else { + for htlc in sources { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes()); let source = HTLCSource::PreviousHopData(htlc.prev_hop); @@ -6436,9 +6364,12 @@ where } } - fn claim_funds_from_hop, bool) -> Option>(&self, - prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) - -> Result<(), (PublicKey, MsgHandleErrInternal)> { + fn claim_funds_from_hop< + ComplFunc: FnOnce(Option, bool) -> (Option, Option) + >( + &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, + completion_action: ComplFunc, + ) { //TODO: Delay the claimed_funds relaying just like we do outbound relay! // If we haven't yet run background events assume we're still deserializing and shouldn't @@ -6475,11 +6406,15 @@ where match fulfill_res { UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => { - if let Some(action) = completion_action(Some(htlc_value_msat), false) { + let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false); + if let Some(action) = action_opt { log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}", chan_id, action); peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); } + if let Some(raa_blocker) = raa_blocker_opt { + peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker); + } if !during_init { handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); @@ -6497,11 +6432,16 @@ where } } UpdateFulfillCommitFetch::DuplicateClaim {} => { - let action = if let Some(action) = completion_action(None, true) { + let (action_opt, raa_blocker_opt) = completion_action(None, true); + if let Some(raa_blocker) = raa_blocker_opt { + debug_assert!(peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker)); + } + let action = if let Some(action) = action_opt { action } else { - return Ok(()); + return; }; + mem::drop(peer_state_lock); log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}", @@ -6516,7 +6456,7 @@ where } else { debug_assert!(false, "Duplicate claims should always free another channel immediately"); - return Ok(()); + return; }; if let Some(peer_state_mtx) = per_peer_state.get(&node_id) { let mut peer_state = peer_state_mtx.lock().unwrap(); @@ -6541,7 +6481,7 @@ where } } } - return Ok(()); + return; } } } @@ -6588,8 +6528,46 @@ where // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are // generally always allowed to be duplicative (and it's specifically noted in // `PaymentForwarded`). - self.handle_monitor_update_completion_actions(completion_action(None, false)); - Ok(()) + let (action_opt, raa_blocker_opt) = completion_action(None, false); + + if let Some(raa_blocker) = raa_blocker_opt { + let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| + // prev_hop.counterparty_node_id is always available for payments received after + // LDK 0.0.123, but for those received on 0.0.123 and claimed later, we need to + // look up the counterparty in the `action_opt`, if possible. + action_opt.as_ref().and_then(|action| + if let MonitorUpdateCompletionAction::PaymentClaimed { pending_mpp_claim, .. } = action { + pending_mpp_claim.as_ref().map(|(node_id, _, _, _)| *node_id) + } else { None } + ) + ); + if let Some(counterparty_node_id) = counterparty_node_id { + // TODO: Avoid always blocking the world for the write lock here. + let mut per_peer_state = self.per_peer_state.write().unwrap(); + let peer_state_mutex = per_peer_state.entry(counterparty_node_id).or_insert_with(|| + Mutex::new(PeerState { + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), + latest_features: InitFeatures::empty(), + pending_msg_events: Vec::new(), + in_flight_monitor_updates: BTreeMap::new(), + monitor_update_blocked_actions: BTreeMap::new(), + actions_blocking_raa_monitor_updates: BTreeMap::new(), + is_connected: false, + })); + let mut peer_state = peer_state_mutex.lock().unwrap(); + + peer_state.actions_blocking_raa_monitor_updates + .entry(prev_hop.channel_id) + .or_insert_with(Vec::new) + .push(raa_blocker); + } else { + debug_assert!(false, + "RAA ChannelMonitorUpdate blockers are only set with PaymentClaimed completion actions, so we should always have a counterparty node id"); + } + } + + self.handle_monitor_update_completion_actions(action_opt); } fn finalize_claims(&self, sources: Vec) { @@ -6622,11 +6600,16 @@ where let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); #[cfg(debug_assertions)] let claiming_chan_funding_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(hop_data, payment_preimage, + self.claim_funds_from_hop(hop_data, payment_preimage, |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = if let Some(node_id) = next_channel_counterparty_node_id { - Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker)) + Some(EventUnblockedChannel { + counterparty_node_id: node_id, + funding_txo: next_channel_outpoint, + channel_id: next_channel_id, + blocking_action: completed_blocker + }) } else { // We can only get `None` here if we are processing a // `ChannelMonitor`-originated event, in which case we @@ -6683,16 +6666,16 @@ where } }), "{:?}", *background_events); } - None + (None, None) } else if definitely_duplicate { if let Some(other_chan) = chan_to_release { - Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id: other_chan.0, - downstream_funding_outpoint: other_chan.1, - downstream_channel_id: other_chan.2, - blocking_action: other_chan.3, - }) - } else { None } + (Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + downstream_counterparty_node_id: other_chan.counterparty_node_id, + downstream_funding_outpoint: other_chan.funding_txo, + downstream_channel_id: other_chan.channel_id, + blocking_action: other_chan.blocking_action, + }), None) + } else { (None, None) } } else { let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { if let Some(claimed_htlc_value) = htlc_claim_value_msat { @@ -6701,7 +6684,7 @@ where } else { None }; debug_assert!(skimmed_fee_msat <= total_fee_earned_msat, "skimmed_fee_msat must always be included in total_fee_earned_msat"); - Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + (Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { event: events::Event::PaymentForwarded { prev_channel_id: Some(prev_channel_id), next_channel_id: Some(next_channel_id), @@ -6713,13 +6696,9 @@ where outbound_amount_forwarded_msat: forwarded_htlc_value_msat, }, downstream_counterparty_and_funding_outpoint: chan_to_release, - }) + }), None) } }); - if let Err((pk, err)) = res { - let result: Result<(), _> = Err(err); - let _ = handle_error!(self, result, pk); - } }, } } @@ -6734,9 +6713,44 @@ where debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread); debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + let mut freed_channels = Vec::new(); + for action in actions.into_iter() { match action { - MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => { + MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { + if let Some((counterparty_node_id, chan_id, htlc_id, claim_ptr)) = pending_mpp_claim { + let per_peer_state = self.per_peer_state.read().unwrap(); + per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { + let mut peer_state = peer_state_mutex.lock().unwrap(); + let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id); + if let btree_map::Entry::Occupied(mut blockers) = blockers_entry { + blockers.get_mut().retain(|blocker| + if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker { + if *pending_claim == claim_ptr { + let mut pending_claim_state_lock = pending_claim.0.lock().unwrap(); + let pending_claim_state = &mut *pending_claim_state_lock; + pending_claim_state.channels_without_preimage.retain(|(cp, outp, cid, hid)| { + if *cp == counterparty_node_id && *cid == chan_id && *hid == htlc_id { + pending_claim_state.channels_with_preimage.push((*cp, *outp, *cid)); + false + } else { true } + }); + if pending_claim_state.channels_without_preimage.is_empty() { + for (cp, outp, cid) in pending_claim_state.channels_with_preimage.iter() { + freed_channels.push((*cp, *outp, *cid, blocker.clone())); + } + } + !pending_claim_state.channels_without_preimage.is_empty() + } else { true } + } else { true } + ); + if blockers.get().is_empty() { + blockers.remove(); + } + } + }); + } + let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); if let Some(ClaimingPayment { amount_msat, @@ -6744,6 +6758,7 @@ where receiver_node_id, htlcs, sender_intended_value: sender_intended_total_msat, + onion_fields, }) = payment { self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed { payment_hash, @@ -6752,6 +6767,7 @@ where receiver_node_id: Some(receiver_node_id), htlcs, sender_intended_total_msat, + onion_fields, }, None)); } }, @@ -6759,8 +6775,11 @@ where event, downstream_counterparty_and_funding_outpoint } => { self.pending_events.lock().unwrap().push_back((event, None)); - if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint { - self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker)); + if let Some(unblocked) = downstream_counterparty_and_funding_outpoint { + self.handle_monitor_update_release( + unblocked.counterparty_node_id, unblocked.funding_txo, + unblocked.channel_id, Some(unblocked.blocking_action), + ); } }, MonitorUpdateCompletionAction::FreeOtherChannelImmediately { @@ -6775,6 +6794,10 @@ where }, } } + + for (node_id, funding_outpoint, channel_id, blocker) in freed_channels { + self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker)); + } } /// Handles a channel reentering a functional state, either due to reconnect or a monitor @@ -6785,7 +6808,7 @@ where pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) - -> (Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { + -> (Option<(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { let logger = WithChannelContext::from(&self.logger, &channel.context, None); log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement", &channel.context.channel_id(), @@ -6801,8 +6824,11 @@ where let mut htlc_forwards = None; if !pending_forwards.is_empty() { - htlc_forwards = Some((short_channel_id, channel.context.get_funding_txo().unwrap(), - channel.context.channel_id(), channel.context.get_user_id(), pending_forwards)); + htlc_forwards = Some(( + short_channel_id, Some(channel.context.get_counterparty_node_id()), + channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), + channel.context.get_user_id(), pending_forwards + )); } let mut decode_update_add_htlcs = None; if !pending_update_adds.is_empty() { @@ -6901,7 +6927,7 @@ where log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.", highest_applied_update_id, channel.context.get_latest_monitor_update_id(), remaining_in_flight); - if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id { + if !channel.is_awaiting_monitor_update() || remaining_in_flight != 0 { return; } handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel); @@ -7251,7 +7277,7 @@ where match phase.get_mut() { ChannelPhase::UnfundedOutboundV1(chan) => { try_chan_phase_entry!(self, chan.accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase); - (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id()) + (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id()) }, _ => { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)); @@ -7304,7 +7330,7 @@ where }, Some(mut phase) => { let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); - let err = ChannelError::Close(err_msg); + let err = ChannelError::close(err_msg); return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1); }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) @@ -7319,7 +7345,7 @@ where // `update_maps_on_chan_removal`), we'll remove the existing channel // from `outpoint_to_peer`. Thus, we must first unset the funding outpoint // on the channel. - let err = ChannelError::Close($err.to_owned()); + let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(msg.temporary_channel_id); return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1); } } } @@ -7404,7 +7430,7 @@ where } else { unreachable!(); } Ok(()) } else { - let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned()); + let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned()); // We weren't able to watch the channel to begin with, so no // updates should be made on it. Previously, full_stack_target // found an (unreachable) panic when the monitor update contained @@ -7475,7 +7501,7 @@ where Ok(()) } else { - try_chan_phase_entry!(self, Err(ChannelError::Close( + try_chan_phase_entry!(self, Err(ChannelError::close( "Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry) } }, @@ -7590,7 +7616,7 @@ where (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result) } else { (tx, None, shutdown_result) } } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7690,7 +7716,7 @@ where } try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7734,7 +7760,7 @@ where next_user_channel_id = chan.context.get_user_id(); res } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7765,7 +7791,7 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { try_chan_phase_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7788,13 +7814,13 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if (msg.failure_code & 0x8000) == 0 { - let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); + let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); try_chan_phase_entry!(self, Err(chan_err), chan_phase_entry); } if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { try_chan_phase_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry); } Ok(()) @@ -7824,7 +7850,7 @@ where } Ok(()) } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7845,15 +7871,15 @@ where } #[inline] - fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) { + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) { let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards); if push_forward_event { self.push_pending_forwards_ev() } } #[inline] - fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool { + fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool { let mut push_forward_event = false; - for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { + for &mut (prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { let mut new_intercept_events = VecDeque::new(); let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { @@ -7872,7 +7898,9 @@ where match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })); + prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, + prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info + })); }, hash_map::Entry::Vacant(entry) => { if !is_our_scid && forward_info.incoming_amt_msat.is_some() && @@ -7890,7 +7918,9 @@ where intercept_id }, None)); entry.insert(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }); + prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, + prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info + }); }, hash_map::Entry::Occupied(_) => { let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash)); @@ -7898,6 +7928,7 @@ where let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + counterparty_node_id: prev_counterparty_node_id, outpoint: prev_funding_outpoint, channel_id: prev_channel_id, htlc_id: prev_htlc_id, @@ -7917,7 +7948,9 @@ where // payments are being processed. push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty; entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }))); + prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, + prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info + }))); } } } @@ -8020,7 +8053,7 @@ where } htlcs_to_fail } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -8046,7 +8079,7 @@ where let logger = WithChannelContext::from(&self.logger, &chan.context, None); try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -8081,7 +8114,7 @@ where update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()), }); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -8119,7 +8152,7 @@ where return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); } let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; - let msg_from_node_one = msg.contents.flags & 1 == 0; + let msg_from_node_one = msg.contents.channel_flags & 1 == 0; if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersistNoEvents); } else { @@ -8133,7 +8166,7 @@ where } } } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a channel_update for an unfunded channel!".into())), chan_phase_entry); } }, @@ -8195,7 +8228,7 @@ where } need_lnd_workaround } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -8286,7 +8319,7 @@ where let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event { reason } else { - ClosureReason::HolderForceClosed + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) } }; failed_channels.push(chan.context.force_shutdown(false, reason.clone())); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { @@ -8394,11 +8427,26 @@ where match phase { ChannelPhase::Funded(chan) => { let msgs = chan.signer_maybe_unblocked(&self.logger); - if let Some(updates) = msgs.commitment_update { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id, - updates, - }); + let cu_msg = msgs.commitment_update.map(|updates| events::MessageSendEvent::UpdateHTLCs { + node_id, + updates, + }); + let raa_msg = msgs.revoke_and_ack.map(|msg| events::MessageSendEvent::SendRevokeAndACK { + node_id, + msg, + }); + match (cu_msg, raa_msg) { + (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => { + pending_msg_events.push(cu); + pending_msg_events.push(raa); + }, + (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => { + pending_msg_events.push(raa); + pending_msg_events.push(cu); + }, + (Some(cu), _) => pending_msg_events.push(cu), + (_, Some(raa)) => pending_msg_events.push(raa), + (_, _) => {}, } if let Some(msg) = msgs.funding_signed { pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { @@ -8542,16 +8590,15 @@ where macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the - /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will - /// not have an expiration unless otherwise set on the builder. + /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer's + /// expiration will be `absolute_expiry` if `Some`, otherwise it will not expire. /// /// # Privacy /// - /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer. - /// However, if one is not found, uses a one-hop [`BlindedPath`] with - /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case, - /// the node must be announced, otherwise, there is no way to find a path to the introduction in - /// order to send the [`InvoiceRequest`]. + /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the offer based on the given + /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for + /// privacy implications as well as those of the parameterized [`Router`], which implements + /// [`MessageRouter`]. /// /// Also, uses a derived signing pubkey in the offer for recipient privacy. /// @@ -8566,19 +8613,29 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { /// /// [`Offer`]: crate::offers::offer::Offer /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest - pub fn create_offer_builder(&$self) -> Result<$builder, Bolt12SemanticError> { + pub fn create_offer_builder( + &$self, absolute_expiry: Option + ) -> Result<$builder, Bolt12SemanticError> { let node_id = $self.get_our_node_id(); let expanded_key = &$self.inbound_payment_key; let entropy = &*$self.entropy_source; let secp_ctx = &$self.secp_ctx; - let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?; + let path = $self.create_blinded_paths_using_absolute_expiry(OffersContext::Unknown {}, absolute_expiry) + .and_then(|paths| paths.into_iter().next().ok_or(())) + .map_err(|_| Bolt12SemanticError::MissingPaths)?; + let builder = OfferBuilder::deriving_signing_pubkey( node_id, expanded_key, entropy, secp_ctx ) .chain_hash($self.chain_hash) .path(path); + let builder = match absolute_expiry { + None => builder, + Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry), + }; + Ok(builder.into()) } } } @@ -8606,11 +8663,10 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { /// /// # Privacy /// - /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund. - /// However, if one is not found, uses a one-hop [`BlindedPath`] with - /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case, - /// the node must be announced, otherwise, there is no way to find a path to the introduction in - /// order to send the [`Bolt12Invoice`]. + /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the refund based on the given + /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for + /// privacy implications as well as those of the parameterized [`Router`], which implements + /// [`MessageRouter`]. /// /// Also, uses a derived payer id in the refund for payer privacy. /// @@ -8639,7 +8695,11 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { let entropy = &*$self.entropy_source; let secp_ctx = &$self.secp_ctx; - let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?; + let context = OffersContext::OutboundPayment { payment_id }; + let path = $self.create_blinded_paths_using_absolute_expiry(context, Some(absolute_expiry)) + .and_then(|paths| paths.into_iter().next().ok_or(())) + .map_err(|_| Bolt12SemanticError::MissingPaths)?; + let builder = RefundBuilder::deriving_payer_id( node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id )? @@ -8660,6 +8720,13 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { } } } +/// Defines the maximum number of [`OffersMessage`] including different reply paths to be sent +/// along different paths. +/// Sending multiple requests increases the chances of successful delivery in case some +/// paths are unavailable. However, only one invoice for a given [`PaymentId`] will be paid, +/// even if multiple invoices are received. +const OFFERS_MESSAGE_REQUEST_LIMIT: usize = 10; + impl ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, @@ -8708,10 +8775,9 @@ where /// /// # Privacy /// - /// Uses a one-hop [`BlindedPath`] for the reply path with [`ChannelManager::get_our_node_id`] - /// as the introduction node and a derived payer id for payer privacy. As such, currently, the - /// node must be announced. Otherwise, there is no way to find a path to the introduction node - /// in order to send the [`Bolt12Invoice`]. + /// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`] + /// to construct a [`BlindedPath`] for the reply path. For further privacy implications, see the + /// docs of the parameterized [`Router`], which implements [`MessageRouter`]. /// /// # Limitations /// @@ -8762,7 +8828,9 @@ where Some(payer_note) => builder.payer_note(payer_note), }; let invoice_request = builder.build_and_sign()?; - let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?; + + let context = OffersContext::OutboundPayment { payment_id }; + let reply_paths = self.create_blinded_paths(context).map_err(|_| Bolt12SemanticError::MissingPaths)?; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -8775,25 +8843,27 @@ where let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); if !offer.paths().is_empty() { - // Send as many invoice requests as there are paths in the offer (with an upper bound). - // Using only one path could result in a failure if the path no longer exists. But only - // one invoice for a given payment id will be paid, even if more than one is received. - const REQUEST_LIMIT: usize = 10; - for path in offer.paths().into_iter().take(REQUEST_LIMIT) { + reply_paths + .iter() + .flat_map(|reply_path| offer.paths().iter().map(move |path| (path, reply_path))) + .take(OFFERS_MESSAGE_REQUEST_LIMIT) + .for_each(|(path, reply_path)| { + let message = new_pending_onion_message( + OffersMessage::InvoiceRequest(invoice_request.clone()), + Destination::BlindedPath(path.clone()), + Some(reply_path.clone()), + ); + pending_offers_messages.push(message); + }); + } else if let Some(signing_pubkey) = offer.signing_pubkey() { + for reply_path in reply_paths { let message = new_pending_onion_message( OffersMessage::InvoiceRequest(invoice_request.clone()), - Destination::BlindedPath(path.clone()), - Some(reply_path.clone()), + Destination::Node(signing_pubkey), + Some(reply_path), ); pending_offers_messages.push(message); } - } else if let Some(signing_pubkey) = offer.signing_pubkey() { - let message = new_pending_onion_message( - OffersMessage::InvoiceRequest(invoice_request), - Destination::Node(signing_pubkey), - Some(reply_path), - ); - pending_offers_messages.push(message); } else { debug_assert!(false); return Err(Bolt12SemanticError::MissingSigningPubkey); @@ -8862,26 +8932,32 @@ where )?; let builder: InvoiceBuilder = builder.into(); let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?; - let reply_path = self.create_blinded_path() + let reply_paths = self.create_blinded_paths(OffersContext::Unknown {}) .map_err(|_| Bolt12SemanticError::MissingPaths)?; let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); if refund.paths().is_empty() { - let message = new_pending_onion_message( - OffersMessage::Invoice(invoice.clone()), - Destination::Node(refund.payer_id()), - Some(reply_path), - ); - pending_offers_messages.push(message); - } else { - for path in refund.paths() { + for reply_path in reply_paths { let message = new_pending_onion_message( OffersMessage::Invoice(invoice.clone()), - Destination::BlindedPath(path.clone()), - Some(reply_path.clone()), + Destination::Node(refund.payer_id()), + Some(reply_path), ); pending_offers_messages.push(message); } + } else { + reply_paths + .iter() + .flat_map(|reply_path| refund.paths().iter().map(move |path| (path, reply_path))) + .take(OFFERS_MESSAGE_REQUEST_LIMIT) + .for_each(|(path, reply_path)| { + let message = new_pending_onion_message( + OffersMessage::Invoice(invoice.clone()), + Destination::BlindedPath(path.clone()), + Some(reply_path.clone()), + ); + pending_offers_messages.push(message); + }); } Ok(invoice) @@ -8988,16 +9064,71 @@ where inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key) } - /// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`]. + /// Creates a collection of blinded paths by delegating to [`MessageRouter`] based on + /// the path's intended lifetime. + /// + /// Whether or not the path is compact depends on whether the path is short-lived or long-lived, + /// respectively, based on the given `absolute_expiry` as seconds since the Unix epoch. See + /// [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. + fn create_blinded_paths_using_absolute_expiry( + &self, context: OffersContext, absolute_expiry: Option, + ) -> Result, ()> { + let now = self.duration_since_epoch(); + let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY); + + if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry { + self.create_compact_blinded_paths(context) + } else { + self.create_blinded_paths(context) + } + } + + pub(super) fn duration_since_epoch(&self) -> Duration { + #[cfg(not(feature = "std"))] + let now = Duration::from_secs( + self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + ); + #[cfg(feature = "std")] + let now = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH"); + + now + } + + /// Creates a collection of blinded paths by delegating to + /// [`MessageRouter::create_blinded_paths`]. /// - /// Errors if the `MessageRouter` errors or returns an empty `Vec`. - fn create_blinded_path(&self) -> Result { + /// Errors if the `MessageRouter` errors. + fn create_blinded_paths(&self, context: OffersContext) -> Result, ()> { let recipient = self.get_our_node_id(); let secp_ctx = &self.secp_ctx; let peers = self.per_peer_state.read().unwrap() .iter() .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap())) + .filter(|(_, peer)| peer.is_connected) + .filter(|(_, peer)| peer.latest_features.supports_onion_messages()) + .map(|(node_id, _)| *node_id) + .collect::>(); + + self.router + .create_blinded_paths(recipient, MessageContext::Offers(context), peers, secp_ctx) + .and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(())) + } + + /// Creates a collection of blinded paths by delegating to + /// [`MessageRouter::create_compact_blinded_paths`]. + /// + /// Errors if the `MessageRouter` errors. + fn create_compact_blinded_paths(&self, context: OffersContext) -> Result, ()> { + let recipient = self.get_our_node_id(); + let secp_ctx = &self.secp_ctx; + + let peers = self.per_peer_state.read().unwrap() + .iter() + .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap())) + .filter(|(_, peer)| peer.is_connected) .filter(|(_, peer)| peer.latest_features.supports_onion_messages()) .map(|(node_id, peer)| ForwardNode { node_id: *node_id, @@ -9010,8 +9141,8 @@ where .collect::>(); self.router - .create_blinded_paths(recipient, peers, secp_ctx) - .and_then(|paths| paths.into_iter().next().ok_or(())) + .create_compact_blinded_paths(recipient, MessageContext::Offers(context), peers, secp_ctx) + .and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(())) } /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to @@ -9404,7 +9535,38 @@ where self, || -> NotifyOption { NotifyOption::DoPersist }); *self.best_block.write().unwrap() = BestBlock::new(block_hash, height); - self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))); + let mut min_anchor_feerate = None; + let mut min_non_anchor_feerate = None; + if self.background_events_processed_since_startup.load(Ordering::Relaxed) { + // If we're past the startup phase, update our feerate cache + let mut last_days_feerates = self.last_days_feerates.lock().unwrap(); + if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS { + last_days_feerates.pop_front(); + } + let anchor_feerate = self.fee_estimator + .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee); + let non_anchor_feerate = self.fee_estimator + .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee); + last_days_feerates.push_back((anchor_feerate, non_anchor_feerate)); + if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS { + min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied(); + min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied(); + } + } + + self.do_chain_event(Some(height), |channel| { + let logger = WithChannelContext::from(&self.logger, &channel.context, None); + if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + if let Some(feerate) = min_anchor_feerate { + channel.check_for_stale_feerate(&logger, feerate)?; + } + } else { + if let Some(feerate) = min_non_anchor_feerate { + channel.check_for_stale_feerate(&logger, feerate)?; + } + } + channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)) + }); macro_rules! max_time { ($timestamp: expr) => { @@ -9616,6 +9778,7 @@ where htlc_id: htlc.prev_htlc_id, incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, phantom_shared_secret: None, + counterparty_node_id: htlc.prev_counterparty_node_id, outpoint: htlc.prev_funding_outpoint, channel_id: htlc.prev_channel_id, blinded_failure: htlc.forward_info.routing.blinded_failure(), @@ -9801,7 +9964,7 @@ where } #[cfg(splicing)] - fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) { + fn handle_splice_init(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Splicing not supported".to_owned(), msg.channel_id.clone())), *counterparty_node_id); @@ -10008,7 +10171,7 @@ where // Quiescence &events::MessageSendEvent::SendStfu { .. } => false, // Splicing - &events::MessageSendEvent::SendSplice { .. } => false, + &events::MessageSendEvent::SendSpliceInit { .. } => false, &events::MessageSendEvent::SendSpliceAck { .. } => false, &events::MessageSendEvent::SendSpliceLocked { .. } => false, // Interactive Transaction Construction @@ -10360,10 +10523,17 @@ where R::Target: Router, L::Target: Logger, { - fn handle_message(&self, message: OffersMessage, responder: Option) -> ResponseInstruction { + fn handle_message(&self, message: OffersMessage, context: OffersContext, responder: Option) -> ResponseInstruction { let secp_ctx = &self.secp_ctx; let expanded_key = &self.inbound_payment_key; + let abandon_if_payment = |context| { + match context { + OffersContext::OutboundPayment { payment_id } => self.abandon_payment(payment_id), + _ => {}, + } + }; + match message { OffersMessage::InvoiceRequest(invoice_request) => { let responder = match responder { @@ -10450,42 +10620,66 @@ where }; match response { - Ok(invoice) => return responder.respond(OffersMessage::Invoice(invoice)), - Err(error) => return responder.respond(OffersMessage::InvoiceError(error.into())), + Ok(invoice) => responder.respond(OffersMessage::Invoice(invoice)), + Err(error) => responder.respond(OffersMessage::InvoiceError(error.into())), } }, OffersMessage::Invoice(invoice) => { - let response = invoice - .verify(expanded_key, secp_ctx) - .map_err(|()| InvoiceError::from_string("Unrecognized invoice".to_owned())) - .and_then(|payment_id| { + let result = match invoice.verify(expanded_key, secp_ctx) { + Ok(payment_id) => { let features = self.bolt12_invoice_features(); if invoice.invoice_features().requires_unknown_bits_from(&features) { Err(InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)) + } else if self.default_configuration.manually_handle_bolt12_invoices { + let event = Event::InvoiceReceived { payment_id, invoice, responder }; + self.pending_events.lock().unwrap().push_back((event, None)); + return ResponseInstruction::NoResponse; } else { - self.send_payment_for_bolt12_invoice(&invoice, payment_id) + self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id) .map_err(|e| { log_trace!(self.logger, "Failed paying invoice: {:?}", e); InvoiceError::from_string(format!("{:?}", e)) }) } - }); + }, + Err(()) => Err(InvoiceError::from_string("Unrecognized invoice".to_owned())), + }; - match (responder, response) { - (Some(responder), Err(e)) => responder.respond(OffersMessage::InvoiceError(e)), - (None, Err(_)) => { - log_trace!( - self.logger, - "A response was generated, but there is no reply_path specified for sending the response." - ); - return ResponseInstruction::NoResponse; - } - _ => return ResponseInstruction::NoResponse, + match result { + Ok(_) => ResponseInstruction::NoResponse, + Err(err) => match responder { + Some(responder) => { + abandon_if_payment(context); + responder.respond(OffersMessage::InvoiceError(err)) + }, + None => { + abandon_if_payment(context); + log_trace!( + self.logger, + "An error response was generated, but there is no reply_path specified \ + for sending the response. Error: {}", + err + ); + return ResponseInstruction::NoResponse; + }, + }, + } + }, + #[cfg(async_payments)] + OffersMessage::StaticInvoice(_invoice) => { + match responder { + Some(responder) => { + responder.respond(OffersMessage::InvoiceError( + InvoiceError::from_string("Static invoices not yet supported".to_string()) + )) + }, + None => return ResponseInstruction::NoResponse, } }, OffersMessage::InvoiceError(invoice_error) => { + abandon_if_payment(context); log_trace!(self.logger, "Received invoice_error: {}", invoice_error); - return ResponseInstruction::NoResponse; + ResponseInstruction::NoResponse }, } } @@ -10495,6 +10689,31 @@ where } } +impl +AsyncPaymentsMessageHandler for ChannelManager +where + M::Target: chain::Watch<::EcdsaSigner>, + T::Target: BroadcasterInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + R::Target: Router, + L::Target: Logger, +{ + fn held_htlc_available( + &self, _message: HeldHtlcAvailable, _responder: Option + ) -> ResponseInstruction { + ResponseInstruction::NoResponse + } + + fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {} + + fn release_pending_messages(&self) -> Vec> { + Vec::new() + } +} + impl NodeIdLookUp for ChannelManager where @@ -10576,140 +10795,6 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; -impl_writeable_tlv_based!(CounterpartyForwardingInfo, { - (2, fee_base_msat, required), - (4, fee_proportional_millionths, required), - (6, cltv_expiry_delta, required), -}); - -impl_writeable_tlv_based!(ChannelCounterparty, { - (2, node_id, required), - (4, features, required), - (6, unspendable_punishment_reserve, required), - (8, forwarding_info, option), - (9, outbound_htlc_minimum_msat, option), - (11, outbound_htlc_maximum_msat, option), -}); - -impl Writeable for ChannelDetails { - fn write(&self, writer: &mut W) -> Result<(), io::Error> { - // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with - // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. - let user_channel_id_low = self.user_channel_id as u64; - let user_channel_id_high_opt = Some((self.user_channel_id >> 64) as u64); - write_tlv_fields!(writer, { - (1, self.inbound_scid_alias, option), - (2, self.channel_id, required), - (3, self.channel_type, option), - (4, self.counterparty, required), - (5, self.outbound_scid_alias, option), - (6, self.funding_txo, option), - (7, self.config, option), - (8, self.short_channel_id, option), - (9, self.confirmations, option), - (10, self.channel_value_satoshis, required), - (12, self.unspendable_punishment_reserve, option), - (14, user_channel_id_low, required), - (16, self.balance_msat, required), - (18, self.outbound_capacity_msat, required), - (19, self.next_outbound_htlc_limit_msat, required), - (20, self.inbound_capacity_msat, required), - (21, self.next_outbound_htlc_minimum_msat, required), - (22, self.confirmations_required, option), - (24, self.force_close_spend_delay, option), - (26, self.is_outbound, required), - (28, self.is_channel_ready, required), - (30, self.is_usable, required), - (32, self.is_public, required), - (33, self.inbound_htlc_minimum_msat, option), - (35, self.inbound_htlc_maximum_msat, option), - (37, user_channel_id_high_opt, option), - (39, self.feerate_sat_per_1000_weight, option), - (41, self.channel_shutdown_state, option), - (43, self.pending_inbound_htlcs, optional_vec), - (45, self.pending_outbound_htlcs, optional_vec), - }); - Ok(()) - } -} - -impl Readable for ChannelDetails { - fn read(reader: &mut R) -> Result { - _init_and_read_len_prefixed_tlv_fields!(reader, { - (1, inbound_scid_alias, option), - (2, channel_id, required), - (3, channel_type, option), - (4, counterparty, required), - (5, outbound_scid_alias, option), - (6, funding_txo, option), - (7, config, option), - (8, short_channel_id, option), - (9, confirmations, option), - (10, channel_value_satoshis, required), - (12, unspendable_punishment_reserve, option), - (14, user_channel_id_low, required), - (16, balance_msat, required), - (18, outbound_capacity_msat, required), - // Note that by the time we get past the required read above, outbound_capacity_msat will be - // filled in, so we can safely unwrap it here. - (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), - (20, inbound_capacity_msat, required), - (21, next_outbound_htlc_minimum_msat, (default_value, 0)), - (22, confirmations_required, option), - (24, force_close_spend_delay, option), - (26, is_outbound, required), - (28, is_channel_ready, required), - (30, is_usable, required), - (32, is_public, required), - (33, inbound_htlc_minimum_msat, option), - (35, inbound_htlc_maximum_msat, option), - (37, user_channel_id_high_opt, option), - (39, feerate_sat_per_1000_weight, option), - (41, channel_shutdown_state, option), - (43, pending_inbound_htlcs, optional_vec), - (45, pending_outbound_htlcs, optional_vec), - }); - - // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with - // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. - let user_channel_id_low: u64 = user_channel_id_low.0.unwrap(); - let user_channel_id = user_channel_id_low as u128 + - ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64); - - Ok(Self { - inbound_scid_alias, - channel_id: channel_id.0.unwrap(), - channel_type, - counterparty: counterparty.0.unwrap(), - outbound_scid_alias, - funding_txo, - config, - short_channel_id, - channel_value_satoshis: channel_value_satoshis.0.unwrap(), - unspendable_punishment_reserve, - user_channel_id, - balance_msat: balance_msat.0.unwrap(), - outbound_capacity_msat: outbound_capacity_msat.0.unwrap(), - next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), - next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(), - inbound_capacity_msat: inbound_capacity_msat.0.unwrap(), - confirmations_required, - confirmations, - force_close_spend_delay, - is_outbound: is_outbound.0.unwrap(), - is_channel_ready: is_channel_ready.0.unwrap(), - is_usable: is_usable.0.unwrap(), - is_public: is_public.0.unwrap(), - inbound_htlc_minimum_msat, - inbound_htlc_maximum_msat, - feerate_sat_per_1000_weight, - channel_shutdown_state, - pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()), - pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()), - }) - } -} - impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), (4, phantom_scid, required), @@ -10744,7 +10829,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (4, payment_data, option), // Added in 0.0.116 (5, custom_tlvs, optional_vec), }, -;); +); impl_writeable_tlv_based!(PendingHTLCInfo, { (0, routing, required), @@ -10824,14 +10909,14 @@ impl Readable for HTLCFailureMsg { } } -impl_writeable_tlv_based_enum!(PendingHTLCStatus, ; +impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ; (0, Forward), (1, Fail), ); impl_writeable_tlv_based_enum!(BlindedFailure, (0, FromIntroductionNode) => {}, - (2, FromBlindedNode) => {}, ; + (2, FromBlindedNode) => {}, ); impl_writeable_tlv_based!(HTLCPreviousHopData, { @@ -10845,6 +10930,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { // Note that by the time we get past the required read for type 2 above, outpoint will be // filled in, so we can safely unwrap it here. (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))), + (11, counterparty_node_id, option), }); impl Writeable for ClaimableHTLC { @@ -11001,6 +11087,7 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, { // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be // filled in, so we can safely unwrap it here. (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))), + (9, prev_counterparty_node_id, option), }); impl Writeable for HTLCForwardInfo { @@ -11344,14 +11431,6 @@ impl Readable for VecDeque<(Event, Option)> { } } -impl_writeable_tlv_based_enum!(ChannelShutdownState, - (0, NotShuttingDown) => {}, - (2, ShutdownInitiated) => {}, - (4, ResolvingHTLCs) => {}, - (6, NegotiatingClosingFee) => {}, - (8, ShutdownComplete) => {}, ; -); - /// Arguments for the creation of a ChannelManager that are not deserialized. /// /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation @@ -12273,6 +12352,7 @@ where amount_msat: claimable_amt_msat, htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(), sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat), + onion_fields: payment.onion_fields, }, None)); } } @@ -12285,7 +12365,12 @@ where for action in actions.iter() { if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: - Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), .. + Some(EventUnblockedChannel { + counterparty_node_id: blocked_node_id, + funding_txo: _, + channel_id: blocked_channel_id, + blocking_action, + }), .. } = action { if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) { log_trace!(logger, @@ -12364,6 +12449,8 @@ where node_signer: args.node_signer, signer_provider: args.signer_provider, + last_days_feerates: Mutex::new(VecDeque::new()), + logger: args.logger, default_configuration: args.default_config, }; @@ -12470,8 +12557,8 @@ mod tests { // update message and would always update the local fee info, even if our peer was // (spuriously) forwarding us our own channel_update. let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..]; - let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 }; - let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 }; + let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 }; + let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 }; // First deliver each peers' own message, checking that the node doesn't need to be // persisted and that its channel info remains the same. @@ -12901,7 +12988,7 @@ mod tests { nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap(); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); // Confirm that the channel_update was not sent immediately to node[1] but was cached. let node_1_events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -12956,11 +13043,11 @@ mod tests { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - - nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); { // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been @@ -13174,6 +13261,7 @@ mod tests { let channel_id = ChannelId::from_bytes([4; 32]); let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap()); let intercept_id = InterceptId([0; 32]); + let error_message = "Channel force-closed"; // Test the API functions. check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key); @@ -13182,9 +13270,9 @@ mod tests { check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key); - check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key); + check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key); - check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key); + check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key); check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key); @@ -13206,15 +13294,16 @@ mod tests { // Dummy values let channel_id = ChannelId::from_bytes([4; 32]); + let error_message = "Channel force-closed"; // Test the API functions. check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42)); check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); - check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id); - check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id); check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id); @@ -13568,6 +13657,7 @@ mod tests { anchors_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let error_message = "Channel force-closed"; nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap(); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); @@ -13577,7 +13667,7 @@ mod tests { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); } _ => panic!("Unexpected event"), } @@ -13685,15 +13775,16 @@ mod tests { let user_config = test_default_channel_config(); let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfg, &node_chanmgr); + let error_message = "Channel force-closed"; // Open a channel, immediately disconnect each other, and broadcast Alice's latest state. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); { let txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); @@ -13811,10 +13902,12 @@ pub mod bench { use crate::util::test_utils; use crate::util::config::{UserConfig, MaxDustHTLCExposure}; + use bitcoin::amount::Amount; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::{Transaction, TxOut}; + use bitcoin::transaction::Version; use crate::sync::{Arc, Mutex, RwLock}; @@ -13891,8 +13984,8 @@ pub mod bench { let tx; if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) { - tx = Transaction { version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: 8_000_000, script_pubkey: output_script, + tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: Amount::from_sat(8_000_000), script_pubkey: output_script, }]}; node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap(); } else { panic!(); }