use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::key::constants::SECRET_KEY_SIZE;
-use bitcoin::network::constants::Network;
+use bitcoin::network::Network;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::{secp256k1, Sequence};
-use crate::blinded_path::BlindedPath;
-use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
+use crate::blinded_path::{BlindedPath, NodeIdLookUp};
+use crate::blinded_path::message::ForwardNode;
+use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, ReceiveTlvs};
use crate::chain;
use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
-use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
+use crate::ln::inbound_payment;
+use crate::ln::types::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
-pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
+use crate::ln::channel_state::ChannelDetails;
use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
use crate::ln::wire::Encode;
-use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, InvoiceBuilder};
+use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
use crate::offers::invoice_error::InvoiceError;
-use crate::offers::merkle::SignError;
-use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
+use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder};
+use crate::offers::offer::{Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction};
use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
-use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
+use crate::sign::ecdsa::EcdsaChannelSigner;
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
use crate::util::wakers::{Future, Notifier};
use crate::util::scid_utils::fake_scid;
use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
use crate::util::logger::{Level, Logger, WithContext};
use crate::util::errors::APIError;
+
#[cfg(not(c_bindings))]
use {
+ crate::offers::offer::DerivedMetadata,
crate::routing::router::DefaultRouter,
crate::routing::gossip::NetworkGraph,
crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
crate::sign::KeysManager,
};
+#[cfg(c_bindings)]
+use {
+ crate::offers::offer::OfferWithDerivedMetadataBuilder,
+ crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
+};
use alloc::collections::{btree_map, BTreeMap};
/// [`Event::PaymentClaimable::onion_fields`] as
/// [`RecipientOnionFields::payment_metadata`].
payment_metadata: Option<Vec<u8>>,
+ /// The context of the payment included by the recipient in a blinded path, or `None` if a
+ /// blinded path was not used.
+ ///
+ /// Used in part to determine the [`events::PaymentPurpose`].
+ payment_context: Option<PaymentContext>,
/// CLTV expiry of the received HTLC.
///
/// Used to track when we should expire pending HTLCs that go unclaimed.
/// For HTLCs received by LDK, these will ultimately bubble back up as
/// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
+ /// Set if this HTLC is the final hop in a multi-hop blinded path.
+ requires_blinded_error: bool,
},
}
match self {
Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
+ Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
_ => None,
}
}
receiver_node_id: PublicKey,
htlcs: Vec<events::ClaimedHTLC>,
sender_intended_value: Option<u64>,
+ onion_fields: Option<RecipientOnionFields>,
}
impl_writeable_tlv_based!(ClaimingPayment, {
(0, amount_msat, required),
(4, receiver_node_id, required),
(5, htlcs, optional_vec),
(7, sender_intended_value, option),
+ (9, onion_fields, option),
});
struct ClaimablePayment {
/// The peer is currently connected (i.e. we've seen a
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
/// [`ChannelMessageHandler::peer_disconnected`].
- is_connected: bool,
+ pub is_connected: bool,
}
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
return false
}
!self.channel_by_id.iter().any(|(_, phase)|
- matches!(phase, ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_))
+ match phase {
+ ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
+ ChannelPhase::UnfundedInboundV1(_) => false,
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(_) => true,
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(_) => false,
+ }
)
&& self.monitor_update_blocked_actions.is_empty()
&& self.in_flight_monitor_updates.is_empty()
type NodeSigner: NodeSigner + ?Sized;
/// A type that may be dereferenced to [`Self::NodeSigner`].
type NS: Deref<Target = Self::NodeSigner>;
- /// A type implementing [`WriteableEcdsaChannelSigner`].
- type Signer: WriteableEcdsaChannelSigner + Sized;
+ /// A type implementing [`EcdsaChannelSigner`].
+ type Signer: EcdsaChannelSigner + Sized;
/// A type implementing [`SignerProvider`] for [`Self::Signer`].
type SignerProvider: SignerProvider<EcdsaSigner= Self::Signer> + ?Sized;
/// A type that may be dereferenced to [`Self::SignerProvider`].
fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, L> { self }
}
-/// Manager which keeps track of a number of channels and sends messages to the appropriate
-/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
+/// A lightning node's channel state machine and payment management logic, which facilitates
+/// sending, forwarding, and receiving payments through lightning channels.
+///
+/// [`ChannelManager`] is parameterized by a number of components to achieve this.
+/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
+/// channel
+/// - [`BroadcasterInterface`] for broadcasting transactions related to opening, funding, and
+/// closing channels
+/// - [`EntropySource`] for providing random data needed for cryptographic operations
+/// - [`NodeSigner`] for cryptographic operations scoped to the node
+/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
+/// - [`FeeEstimator`] to determine transaction fee rates needed to have a transaction mined in a
+/// timely manner
+/// - [`Router`] for finding payment paths when initiating and retrying payments
+/// - [`Logger`] for logging operational information of varying degrees
+///
+/// Additionally, it implements the following traits:
+/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
+/// - [`MessageSendEventsProvider`] to similarly send such messages to peers
+/// - [`OffersMessageHandler`] for BOLT 12 message handling and sending
+/// - [`EventsProvider`] to generate user-actionable [`Event`]s
+/// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity
+///
+/// Thus, [`ChannelManager`] is typically used to parameterize a [`MessageHandler`] and an
+/// [`OnionMessenger`]. The latter is required to support BOLT 12 functionality.
+///
+/// # `ChannelManager` vs `ChannelMonitor`
+///
+/// It's important to distinguish between the *off-chain* management and *on-chain* enforcement of
+/// lightning channels. [`ChannelManager`] exchanges messages with peers to manage the off-chain
+/// state of each channel. During this process, it generates a [`ChannelMonitor`] for each channel
+/// and a [`ChannelMonitorUpdate`] for each relevant change, notifying its parameterized
+/// [`chain::Watch`] of them.
+///
+/// An implementation of [`chain::Watch`], such as [`ChainMonitor`], is responsible for aggregating
+/// these [`ChannelMonitor`]s and applying any [`ChannelMonitorUpdate`]s to them. It then monitors
+/// for any pertinent on-chain activity, enforcing claims as needed.
+///
+/// This division of off-chain management and on-chain enforcement allows for interesting node
+/// setups. For instance, on-chain enforcement could be moved to a separate host or have added
+/// redundancy, possibly as a watchtower. See [`chain::Watch`] for the relevant interface.
+///
+/// # Initialization
+///
+/// Use [`ChannelManager::new`] with the most recent [`BlockHash`] when creating a fresh instance.
+/// Otherwise, if restarting, construct [`ChannelManagerReadArgs`] with the necessary parameters and
+/// references to any deserialized [`ChannelMonitor`]s that were previously persisted. Use this to
+/// deserialize the [`ChannelManager`] and feed it any new chain data since it was last online, as
+/// detailed in the [`ChannelManagerReadArgs`] documentation.
+///
+/// ```
+/// use bitcoin::BlockHash;
+/// use bitcoin::network::Network;
+/// use lightning::chain::BestBlock;
+/// # use lightning::chain::channelmonitor::ChannelMonitor;
+/// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs};
+/// # use lightning::routing::gossip::NetworkGraph;
+/// use lightning::util::config::UserConfig;
+/// use lightning::util::ser::ReadableArgs;
+///
+/// # fn read_channel_monitors() -> Vec<ChannelMonitor<lightning::sign::InMemorySigner>> { vec![] }
+/// # fn example<
+/// # 'a,
+/// # L: lightning::util::logger::Logger,
+/// # ES: lightning::sign::EntropySource,
+/// # S: for <'b> lightning::routing::scoring::LockableScore<'b, ScoreLookUp = SL>,
+/// # SL: lightning::routing::scoring::ScoreLookUp<ScoreParams = SP>,
+/// # SP: Sized,
+/// # R: lightning::io::Read,
+/// # >(
+/// # fee_estimator: &dyn lightning::chain::chaininterface::FeeEstimator,
+/// # chain_monitor: &dyn lightning::chain::Watch<lightning::sign::InMemorySigner>,
+/// # tx_broadcaster: &dyn lightning::chain::chaininterface::BroadcasterInterface,
+/// # router: &lightning::routing::router::DefaultRouter<&NetworkGraph<&'a L>, &'a L, &ES, &S, SP, SL>,
+/// # logger: &L,
+/// # entropy_source: &ES,
+/// # node_signer: &dyn lightning::sign::NodeSigner,
+/// # signer_provider: &lightning::sign::DynSignerProvider,
+/// # best_block: lightning::chain::BestBlock,
+/// # current_timestamp: u32,
+/// # mut reader: R,
+/// # ) -> Result<(), lightning::ln::msgs::DecodeError> {
+/// // Fresh start with no channels
+/// let params = ChainParameters {
+/// network: Network::Bitcoin,
+/// best_block,
+/// };
+/// let default_config = UserConfig::default();
+/// let channel_manager = ChannelManager::new(
+/// fee_estimator, chain_monitor, tx_broadcaster, router, logger, entropy_source, node_signer,
+/// signer_provider, default_config, params, current_timestamp
+/// );
+///
+/// // Restart from deserialized data
+/// let mut channel_monitors = read_channel_monitors();
+/// let args = ChannelManagerReadArgs::new(
+/// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
+/// router, logger, default_config, channel_monitors.iter_mut().collect()
+/// );
+/// let (block_hash, channel_manager) =
+/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
+///
+/// // Update the ChannelManager and ChannelMonitors with the latest chain data
+/// // ...
+///
+/// // Move the monitors to the ChannelManager's chain::Watch parameter
+/// for monitor in channel_monitors {
+/// chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
+/// }
+/// # Ok(())
+/// # }
+/// ```
+///
+/// # Operation
+///
+/// The following is required for [`ChannelManager`] to function properly:
+/// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically
+/// called by [`PeerManager::read_event`] when processing network I/O)
+/// - Send messages to peers obtained via its [`MessageSendEventsProvider`] implementation
+/// (typically initiated when [`PeerManager::process_events`] is called)
+/// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation
+/// as documented by those traits
+/// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly
+/// every minute
+/// - Persist to disk whenever [`get_and_clear_needs_persistence`] returns `true` using a
+/// [`Persister`] such as a [`KVStore`] implementation
+/// - Handle [`Event`]s obtained via its [`EventsProvider`] implementation
+///
+/// The [`Future`] returned by [`get_event_or_persistence_needed_future`] is useful in determining
+/// when the last two requirements need to be checked.
+///
+/// The [`lightning-block-sync`] and [`lightning-transaction-sync`] crates provide utilities that
+/// simplify feeding in on-chain activity using the [`chain::Listen`] and [`chain::Confirm`] traits,
+/// respectively. The remaining requirements can be met using the [`lightning-background-processor`]
+/// crate. For languages other than Rust, the availability of similar utilities may vary.
+///
+/// # Channels
+///
+/// [`ChannelManager`]'s primary function involves managing a channel state. Without channels,
+/// payments can't be sent. Use [`list_channels`] or [`list_usable_channels`] for a snapshot of the
+/// currently open channels.
+///
+/// ```
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let channels = channel_manager.list_usable_channels();
+/// for details in channels {
+/// println!("{:?}", details);
+/// }
+/// # }
+/// ```
+///
+/// Each channel is identified using a [`ChannelId`], which will change throughout the channel's
+/// life cycle. Additionally, channels are assigned a `user_channel_id`, which is given in
+/// [`Event`]s associated with the channel and serves as a fixed identifier but is otherwise unused
+/// by [`ChannelManager`].
+///
+/// ## Opening Channels
+///
+/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of
+/// opening an outbound channel, which requires self-funding when handling
+/// [`Event::FundingGenerationReady`].
+///
+/// ```
+/// # use bitcoin::{ScriptBuf, Transaction};
+/// # use bitcoin::secp256k1::PublicKey;
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::events::{Event, EventsProvider};
+/// #
+/// # trait Wallet {
+/// # fn create_funding_transaction(
+/// # &self, _amount_sats: u64, _output_script: ScriptBuf
+/// # ) -> Transaction;
+/// # }
+/// #
+/// # fn example<T: AChannelManager, W: Wallet>(channel_manager: T, wallet: W, peer_id: PublicKey) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let value_sats = 1_000_000;
+/// let push_msats = 10_000_000;
+/// match channel_manager.create_channel(peer_id, value_sats, push_msats, 42, None, None) {
+/// Ok(channel_id) => println!("Opening channel {}", channel_id),
+/// Err(e) => println!("Error opening channel: {:?}", e),
+/// }
+///
+/// // On the event processing thread once the peer has responded
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::FundingGenerationReady {
+/// temporary_channel_id, counterparty_node_id, channel_value_satoshis, output_script,
+/// user_channel_id, ..
+/// } => {
+/// assert_eq!(user_channel_id, 42);
+/// let funding_transaction = wallet.create_funding_transaction(
+/// channel_value_satoshis, output_script
+/// );
+/// match channel_manager.funding_transaction_generated(
+/// &temporary_channel_id, &counterparty_node_id, funding_transaction
+/// ) {
+/// Ok(()) => println!("Funding channel {}", temporary_channel_id),
+/// Err(e) => println!("Error funding channel {}: {:?}", temporary_channel_id, e),
+/// }
+/// },
+/// Event::ChannelPending { channel_id, user_channel_id, former_temporary_channel_id, .. } => {
+/// assert_eq!(user_channel_id, 42);
+/// println!(
+/// "Channel {} now {} pending (funding transaction has been broadcasted)", channel_id,
+/// former_temporary_channel_id.unwrap()
+/// );
+/// },
+/// Event::ChannelReady { channel_id, user_channel_id, .. } => {
+/// assert_eq!(user_channel_id, 42);
+/// println!("Channel {} ready", channel_id);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## Accepting Channels
+///
+/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`]
+/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be
+/// either accepted or rejected when handling [`Event::OpenChannelRequest`].
+///
+/// ```
+/// # use bitcoin::secp256k1::PublicKey;
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::events::{Event, EventsProvider};
+/// #
+/// # fn is_trusted(counterparty_node_id: PublicKey) -> bool {
+/// # // ...
+/// # unimplemented!()
+/// # }
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) {
+/// # let channel_manager = channel_manager.get_cm();
+/// # let error_message = "Channel force-closed";
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => {
+/// if !is_trusted(counterparty_node_id) {
+/// match channel_manager.force_close_without_broadcasting_txn(
+/// &temporary_channel_id, &counterparty_node_id, error_message.to_string()
+/// ) {
+/// Ok(()) => println!("Rejecting channel {}", temporary_channel_id),
+/// Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e),
+/// }
+/// return;
+/// }
+///
+/// let user_channel_id = 43;
+/// match channel_manager.accept_inbound_channel(
+/// &temporary_channel_id, &counterparty_node_id, user_channel_id
+/// ) {
+/// Ok(()) => println!("Accepting channel {}", temporary_channel_id),
+/// Err(e) => println!("Error accepting channel {}: {:?}", temporary_channel_id, e),
+/// }
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## Closing Channels
+///
+/// There are two ways to close a channel: either cooperatively using [`close_channel`] or
+/// unilaterally using [`force_close_broadcasting_latest_txn`]. The former is ideal as it makes for
+/// lower fees and immediate access to funds. However, the latter may be necessary if the
+/// counterparty isn't behaving properly or has gone offline. [`Event::ChannelClosed`] is generated
+/// once the channel has been closed successfully.
+///
+/// ```
+/// # use bitcoin::secp256k1::PublicKey;
+/// # use lightning::ln::types::ChannelId;
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::events::{Event, EventsProvider};
+/// #
+/// # fn example<T: AChannelManager>(
+/// # channel_manager: T, channel_id: ChannelId, counterparty_node_id: PublicKey
+/// # ) {
+/// # let channel_manager = channel_manager.get_cm();
+/// match channel_manager.close_channel(&channel_id, &counterparty_node_id) {
+/// Ok(()) => println!("Closing channel {}", channel_id),
+/// Err(e) => println!("Error closing channel {}: {:?}", channel_id, e),
+/// }
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::ChannelClosed { channel_id, user_channel_id, .. } => {
+/// assert_eq!(user_channel_id, 42);
+/// println!("Channel {} closed", channel_id);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// # Payments
+///
+/// [`ChannelManager`] is responsible for sending, forwarding, and receiving payments through its
+/// channels. A payment is typically initiated from a [BOLT 11] invoice or a [BOLT 12] offer, though
+/// spontaneous (i.e., keysend) payments are also possible. Incoming payments don't require
+/// maintaining any additional state as [`ChannelManager`] can reconstruct the [`PaymentPreimage`]
+/// from the [`PaymentSecret`]. Sending payments, however, require tracking in order to retry failed
+/// HTLCs.
+///
+/// After a payment is initiated, it will appear in [`list_recent_payments`] until a short time
+/// after either an [`Event::PaymentSent`] or [`Event::PaymentFailed`] is handled. Failed HTLCs
+/// for a payment will be retried according to the payment's [`Retry`] strategy or until
+/// [`abandon_payment`] is called.
+///
+/// ## BOLT 11 Invoices
+///
+/// The [`lightning-invoice`] crate is useful for creating BOLT 11 invoices. Specifically, use the
+/// functions in its `utils` module for constructing invoices that are compatible with
+/// [`ChannelManager`]. These functions serve as a convenience for building invoices with the
+/// [`PaymentHash`] and [`PaymentSecret`] returned from [`create_inbound_payment`]. To provide your
+/// own [`PaymentHash`], use [`create_inbound_payment_for_hash`] or the corresponding functions in
+/// the [`lightning-invoice`] `utils` module.
///
-/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through
-/// to individual Channels.
+/// [`ChannelManager`] generates an [`Event::PaymentClaimable`] once the full payment has been
+/// received. Call [`claim_funds`] to release the [`PaymentPreimage`], which in turn will result in
+/// an [`Event::PaymentClaimed`].
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) {
+/// # let channel_manager = channel_manager.get_cm();
+/// // Or use utils::create_invoice_from_channelmanager
+/// let known_payment_hash = match channel_manager.create_inbound_payment(
+/// Some(10_000_000), 3600, None
+/// ) {
+/// Ok((payment_hash, _payment_secret)) => {
+/// println!("Creating inbound payment {}", payment_hash);
+/// payment_hash
+/// },
+/// Err(()) => panic!("Error creating inbound payment"),
+/// };
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
+/// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => {
+/// assert_eq!(payment_hash, known_payment_hash);
+/// println!("Claiming payment {}", payment_hash);
+/// channel_manager.claim_funds(payment_preimage);
+/// },
+/// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: None, .. } => {
+/// println!("Unknown payment hash: {}", payment_hash);
+/// },
+/// PaymentPurpose::SpontaneousPayment(payment_preimage) => {
+/// assert_ne!(payment_hash, known_payment_hash);
+/// println!("Claiming spontaneous payment {}", payment_hash);
+/// channel_manager.claim_funds(payment_preimage);
+/// },
+/// // ...
+/// # _ => {},
+/// },
+/// Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
+/// assert_eq!(payment_hash, known_payment_hash);
+/// println!("Claimed {} msats", amount_msat);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// For paying an invoice, [`lightning-invoice`] provides a `payment` module with convenience
+/// functions for use with [`send_payment`].
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider};
+/// # use lightning::ln::types::PaymentHash;
+/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry};
+/// # use lightning::routing::router::RouteParameters;
+/// #
+/// # fn example<T: AChannelManager>(
+/// # channel_manager: T, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
+/// # route_params: RouteParameters, retry: Retry
+/// # ) {
+/// # let channel_manager = channel_manager.get_cm();
+/// // let (payment_hash, recipient_onion, route_params) =
+/// // payment::payment_parameters_from_invoice(&invoice);
+/// let payment_id = PaymentId([42; 32]);
+/// match channel_manager.send_payment(
+/// payment_hash, recipient_onion, payment_id, route_params, retry
+/// ) {
+/// Ok(()) => println!("Sending payment with hash {}", payment_hash),
+/// Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e),
+/// }
+///
+/// let expected_payment_id = payment_id;
+/// let expected_payment_hash = payment_hash;
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::Pending {
+/// payment_id: expected_payment_id,
+/// payment_hash: expected_payment_hash,
+/// ..
+/// }
+/// )).is_some()
+/// );
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentSent { payment_hash, .. } => println!("Paid {}", payment_hash),
+/// Event::PaymentFailed { payment_hash, .. } => println!("Failed paying {}", payment_hash),
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## BOLT 12 Offers
+///
+/// The [`offers`] module is useful for creating BOLT 12 offers. An [`Offer`] is a precursor to a
+/// [`Bolt12Invoice`], which must first be requested by the payer. The interchange of these messages
+/// as defined in the specification is handled by [`ChannelManager`] and its implementation of
+/// [`OffersMessageHandler`]. However, this only works with an [`Offer`] created using a builder
+/// returned by [`create_offer_builder`]. With this approach, BOLT 12 offers and invoices are
+/// stateless just as BOLT 11 invoices are.
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::offers::parse::Bolt12SemanticError;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) -> Result<(), Bolt12SemanticError> {
+/// # let channel_manager = channel_manager.get_cm();
+/// # let absolute_expiry = None;
+/// let offer = channel_manager
+/// .create_offer_builder(absolute_expiry)?
+/// # ;
+/// # // Needed for compiling for c_bindings
+/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into();
+/// # let offer = builder
+/// .description("coffee".to_string())
+/// .amount_msats(10_000_000)
+/// .build()?;
+/// let bech32_offer = offer.to_string();
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
+/// PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(payment_preimage), .. } => {
+/// println!("Claiming payment {}", payment_hash);
+/// channel_manager.claim_funds(payment_preimage);
+/// },
+/// PaymentPurpose::Bolt12OfferPayment { payment_preimage: None, .. } => {
+/// println!("Unknown payment hash: {}", payment_hash);
+/// },
+/// // ...
+/// # _ => {},
+/// },
+/// Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
+/// println!("Claimed {} msats", amount_msat);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Use [`pay_for_offer`] to initiated payment, which sends an [`InvoiceRequest`] for an [`Offer`]
+/// and pays the [`Bolt12Invoice`] response. In addition to success and failure events,
+/// [`ChannelManager`] may also generate an [`Event::InvoiceRequestFailed`].
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider};
+/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
+/// # use lightning::offers::offer::Offer;
+/// #
+/// # fn example<T: AChannelManager>(
+/// # channel_manager: T, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
+/// # payer_note: Option<String>, retry: Retry, max_total_routing_fee_msat: Option<u64>
+/// # ) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let payment_id = PaymentId([42; 32]);
+/// match channel_manager.pay_for_offer(
+/// offer, quantity, amount_msats, payer_note, payment_id, retry, max_total_routing_fee_msat
+/// ) {
+/// Ok(()) => println!("Requesting invoice for offer"),
+/// Err(e) => println!("Unable to request invoice for offer: {:?}", e),
+/// }
+///
+/// // First the payment will be waiting on an invoice
+/// let expected_payment_id = payment_id;
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
+/// )).is_some()
+/// );
+///
+/// // Once the invoice is received, a payment will be sent
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::Pending { payment_id: expected_payment_id, .. }
+/// )).is_some()
+/// );
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
+/// Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
+/// Event::InvoiceRequestFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## BOLT 12 Refunds
+///
+/// A [`Refund`] is a request for an invoice to be paid. Like *paying* for an [`Offer`], *creating*
+/// a [`Refund`] involves maintaining state since it represents a future outbound payment.
+/// Therefore, use [`create_refund_builder`] when creating one, otherwise [`ChannelManager`] will
+/// refuse to pay any corresponding [`Bolt12Invoice`] that it receives.
+///
+/// ```
+/// # use core::time::Duration;
+/// # use lightning::events::{Event, EventsProvider};
+/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
+/// # use lightning::offers::parse::Bolt12SemanticError;
+/// #
+/// # fn example<T: AChannelManager>(
+/// # channel_manager: T, amount_msats: u64, absolute_expiry: Duration, retry: Retry,
+/// # max_total_routing_fee_msat: Option<u64>
+/// # ) -> Result<(), Bolt12SemanticError> {
+/// # let channel_manager = channel_manager.get_cm();
+/// let payment_id = PaymentId([42; 32]);
+/// let refund = channel_manager
+/// .create_refund_builder(
+/// amount_msats, absolute_expiry, payment_id, retry, max_total_routing_fee_msat
+/// )?
+/// # ;
+/// # // Needed for compiling for c_bindings
+/// # let builder: lightning::offers::refund::RefundBuilder<_> = refund.into();
+/// # let refund = builder
+/// .description("coffee".to_string())
+/// .payer_note("refund for order 1234".to_string())
+/// .build()?;
+/// let bech32_refund = refund.to_string();
+///
+/// // First the payment will be waiting on an invoice
+/// let expected_payment_id = payment_id;
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
+/// )).is_some()
+/// );
+///
+/// // Once the invoice is received, a payment will be sent
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::Pending { payment_id: expected_payment_id, .. }
+/// )).is_some()
+/// );
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
+/// Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
+/// // ...
+/// # _ => {},
+/// });
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Use [`request_refund_payment`] to send a [`Bolt12Invoice`] for receiving the refund. Similar to
+/// *creating* an [`Offer`], this is stateless as it represents an inbound payment.
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::offers::refund::Refund;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T, refund: &Refund) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let known_payment_hash = match channel_manager.request_refund_payment(refund) {
+/// Ok(invoice) => {
+/// let payment_hash = invoice.payment_hash();
+/// println!("Requesting refund payment {}", payment_hash);
+/// payment_hash
+/// },
+/// Err(e) => panic!("Unable to request payment for refund: {:?}", e),
+/// };
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
+/// PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(payment_preimage), .. } => {
+/// assert_eq!(payment_hash, known_payment_hash);
+/// println!("Claiming payment {}", payment_hash);
+/// channel_manager.claim_funds(payment_preimage);
+/// },
+/// PaymentPurpose::Bolt12RefundPayment { payment_preimage: None, .. } => {
+/// println!("Unknown payment hash: {}", payment_hash);
+/// },
+/// // ...
+/// # _ => {},
+/// },
+/// Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
+/// assert_eq!(payment_hash, known_payment_hash);
+/// println!("Claimed {} msats", amount_msat);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// # Persistence
///
/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
/// all peers during write/read (though does not modify this instance, only the instance being
/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
///
+/// # `ChannelUpdate` Messages
+///
/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
///
+/// # DoS Mitigation
+///
/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
/// inbound channels without confirmed funding transactions. This may result in nodes which we do
/// not have a channel with being unable to connect to us or open new channels with us if we have
/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
/// never limited. Please ensure you limit the count of such channels yourself.
///
+/// # Type Aliases
+///
/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
/// you're using lightning-net-tokio.
///
+/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
+/// [`MessageHandler`]: crate::ln::peer_handler::MessageHandler
+/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
+/// [`PeerManager::read_event`]: crate::ln::peer_handler::PeerManager::read_event
+/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+/// [`timer_tick_occurred`]: Self::timer_tick_occurred
+/// [`get_and_clear_needs_persistence`]: Self::get_and_clear_needs_persistence
+/// [`Persister`]: crate::util::persist::Persister
+/// [`KVStore`]: crate::util::persist::KVStore
+/// [`get_event_or_persistence_needed_future`]: Self::get_event_or_persistence_needed_future
+/// [`lightning-block-sync`]: https://docs.rs/lightning_block_sync/latest/lightning_block_sync
+/// [`lightning-transaction-sync`]: https://docs.rs/lightning_transaction_sync/latest/lightning_transaction_sync
+/// [`lightning-background-processor`]: https://docs.rs/lightning_background_processor/lightning_background_processor
+/// [`list_channels`]: Self::list_channels
+/// [`list_usable_channels`]: Self::list_usable_channels
+/// [`create_channel`]: Self::create_channel
+/// [`close_channel`]: Self::force_close_broadcasting_latest_txn
+/// [`force_close_broadcasting_latest_txn`]: Self::force_close_broadcasting_latest_txn
+/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
+/// [BOLT 12]: https://github.com/rustyrussell/lightning-rfc/blob/guilt/offers/12-offer-encoding.md
+/// [`list_recent_payments`]: Self::list_recent_payments
+/// [`abandon_payment`]: Self::abandon_payment
+/// [`lightning-invoice`]: https://docs.rs/lightning_invoice/latest/lightning_invoice
+/// [`create_inbound_payment`]: Self::create_inbound_payment
+/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
+/// [`claim_funds`]: Self::claim_funds
+/// [`send_payment`]: Self::send_payment
+/// [`offers`]: crate::offers
+/// [`create_offer_builder`]: Self::create_offer_builder
+/// [`pay_for_offer`]: Self::pay_for_offer
+/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+/// [`create_refund_builder`]: Self::create_refund_builder
+/// [`request_refund_payment`]: Self::request_refund_payment
/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
/// [`funding_created`]: msgs::FundingCreated
/// [`funding_transaction_generated`]: Self::funding_transaction_generated
/// [`BlockHash`]: bitcoin::hash_types::BlockHash
/// [`update_channel`]: chain::Watch::update_channel
/// [`ChannelUpdate`]: msgs::ChannelUpdate
-/// [`timer_tick_occurred`]: Self::timer_tick_occurred
/// [`read`]: ReadableArgs::read
//
// Lock order:
// | |
// | |__`pending_intercepted_htlcs`
// |
+// |__`decode_update_add_htlcs`
+// |
// |__`per_peer_state`
// |
// |__`pending_inbound_payments`
/// See `ChannelManager` struct-level documentation for lock order requirements.
pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
+ /// SCID/SCID Alias -> pending `update_add_htlc`s to decode.
+ ///
+ /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
+ /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
+ /// and via the classic SCID.
+ ///
+ /// Note that no consistency guarantees are made about the existence of a channel with the
+ /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`!
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
+
/// The sets of payments which are claimable or currently being claimed. See
/// [`ClaimablePayments`]' individual field docs for more info.
///
pending_offers_messages: Mutex<Vec<PendingOnionMessage<OffersMessage>>>,
+ /// Tracks the message events that are to be broadcasted when we are connected to some peer.
+ pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
+
entropy_source: ES,
node_signer: NS,
signer_provider: SP,
/// many peers we reject new (inbound) connections.
const MAX_NO_CHANNEL_PEERS: usize = 250;
-/// Information needed for constructing an invoice route hint for this channel.
-#[derive(Clone, Debug, PartialEq)]
-pub struct CounterpartyForwardingInfo {
- /// Base routing fee in millisatoshis.
- pub fee_base_msat: u32,
- /// Amount in millionths of a satoshi the channel will charge per transferred satoshi.
- pub fee_proportional_millionths: u32,
- /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart,
- /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s
- /// `cltv_expiry_delta` for more details.
- pub cltv_expiry_delta: u16,
-}
-
-/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
-/// to better separate parameters.
-#[derive(Clone, Debug, PartialEq)]
-pub struct ChannelCounterparty {
- /// The node_id of our counterparty
- pub node_id: PublicKey,
- /// The Features the channel counterparty provided upon last connection.
- /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
- /// many routing-relevant features are present in the init context.
- pub features: InitFeatures,
- /// The value, in satoshis, that must always be held in the channel for our counterparty. This
- /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
- /// claiming at least this value on chain.
- ///
- /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
- ///
- /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
- pub unspendable_punishment_reserve: u64,
- /// Information on the fees and requirements that the counterparty requires when forwarding
- /// payments to us through this channel.
- pub forwarding_info: Option<CounterpartyForwardingInfo>,
- /// The smallest value HTLC (in msat) the remote peer will accept, for this channel. This field
- /// is only `None` before we have received either the `OpenChannel` or `AcceptChannel` message
- /// from the remote peer, or for `ChannelCounterparty` objects serialized prior to LDK 0.0.107.
- pub outbound_htlc_minimum_msat: Option<u64>,
- /// The largest value HTLC (in msat) the remote peer currently will accept, for this channel.
- pub outbound_htlc_maximum_msat: Option<u64>,
-}
-
-/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
-#[derive(Clone, Debug, PartialEq)]
-pub struct ChannelDetails {
- /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
- /// thereafter this is the txid of the funding transaction xor the funding transaction output).
- /// Note that this means this value is *not* persistent - it can change once during the
- /// lifetime of the channel.
- pub channel_id: ChannelId,
- /// Parameters which apply to our counterparty. See individual fields for more information.
- pub counterparty: ChannelCounterparty,
- /// The Channel's funding transaction output, if we've negotiated the funding transaction with
- /// our counterparty already.
- pub funding_txo: Option<OutPoint>,
- /// The features which this channel operates with. See individual features for more info.
- ///
- /// `None` until negotiation completes and the channel type is finalized.
- pub channel_type: Option<ChannelTypeFeatures>,
- /// The position of the funding transaction in the chain. None if the funding transaction has
- /// not yet been confirmed and the channel fully opened.
- ///
- /// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound
- /// payments instead of this. See [`get_inbound_payment_scid`].
- ///
- /// For channels with [`confirmations_required`] set to `Some(0)`, [`outbound_scid_alias`] may
- /// be used in place of this in outbound routes. See [`get_outbound_payment_scid`].
- ///
- /// [`inbound_scid_alias`]: Self::inbound_scid_alias
- /// [`outbound_scid_alias`]: Self::outbound_scid_alias
- /// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid
- /// [`get_outbound_payment_scid`]: Self::get_outbound_payment_scid
- /// [`confirmations_required`]: Self::confirmations_required
- pub short_channel_id: Option<u64>,
- /// An optional [`short_channel_id`] alias for this channel, randomly generated by us and
- /// usable in place of [`short_channel_id`] to reference the channel in outbound routes when
- /// the channel has not yet been confirmed (as long as [`confirmations_required`] is
- /// `Some(0)`).
- ///
- /// This will be `None` as long as the channel is not available for routing outbound payments.
- ///
- /// [`short_channel_id`]: Self::short_channel_id
- /// [`confirmations_required`]: Self::confirmations_required
- pub outbound_scid_alias: Option<u64>,
- /// An optional [`short_channel_id`] alias for this channel, randomly generated by our
- /// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our
- /// counterparty will recognize the alias provided here in place of the [`short_channel_id`]
- /// when they see a payment to be routed to us.
- ///
- /// Our counterparty may choose to rotate this value at any time, though will always recognize
- /// previous values for inbound payment forwarding.
- ///
- /// [`short_channel_id`]: Self::short_channel_id
- pub inbound_scid_alias: Option<u64>,
- /// The value, in satoshis, of this channel as appears in the funding output
- pub channel_value_satoshis: u64,
- /// The value, in satoshis, that must always be held in the channel for us. This value ensures
- /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least
- /// this value on chain.
- ///
- /// This value is not included in [`outbound_capacity_msat`] as it can never be spent.
- ///
- /// This value will be `None` for outbound channels until the counterparty accepts the channel.
- ///
- /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
- pub unspendable_punishment_reserve: Option<u64>,
- /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
- /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
- /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
- /// `user_channel_id` will be randomized for an inbound channel. This may be zero for objects
- /// serialized with LDK versions prior to 0.0.113.
- ///
- /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
- /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
- pub user_channel_id: u128,
- /// The currently negotiated fee rate denominated in satoshi per 1000 weight units,
- /// which is applied to commitment and HTLC transactions.
- ///
- /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
- pub feerate_sat_per_1000_weight: Option<u32>,
- /// Our total balance. This is the amount we would get if we close the channel.
- /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
- /// amount is not likely to be recoverable on close.
- ///
- /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose
- /// balance is not available for inclusion in new outbound HTLCs). This further does not include
- /// any pending outgoing HTLCs which are awaiting some other resolution to be sent.
- /// This does not consider any on-chain fees.
- ///
- /// See also [`ChannelDetails::outbound_capacity_msat`]
- pub balance_msat: u64,
- /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
- /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
- /// available for inclusion in new outbound HTLCs). This further does not include any pending
- /// outgoing HTLCs which are awaiting some other resolution to be sent.
- ///
- /// See also [`ChannelDetails::balance_msat`]
- ///
- /// This value is not exact. Due to various in-flight changes, feerate changes, and our
- /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
- /// should be able to spend nearly this amount.
- pub outbound_capacity_msat: u64,
- /// The available outbound capacity for sending a single HTLC to the remote peer. This is
- /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by
- /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
- /// to use a limit as close as possible to the HTLC limit we can currently send.
- ///
- /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
- /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
- pub next_outbound_htlc_limit_msat: u64,
- /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
- /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
- /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a
- /// route which is valid.
- pub next_outbound_htlc_minimum_msat: u64,
- /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
- /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
- /// available for inclusion in new inbound HTLCs).
- /// Note that there are some corner cases not fully handled here, so the actual available
- /// inbound capacity may be slightly higher than this.
- ///
- /// This value is not exact. Due to various in-flight changes, feerate changes, and our
- /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable.
- /// However, our counterparty should be able to spend nearly this amount.
- pub inbound_capacity_msat: u64,
- /// The number of required confirmations on the funding transaction before the funding will be
- /// considered "locked". This number is selected by the channel fundee (i.e. us if
- /// [`is_outbound`] is *not* set), and can be selected for inbound channels with
- /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with
- /// [`ChannelHandshakeLimits::max_minimum_depth`].
- ///
- /// This value will be `None` for outbound channels until the counterparty accepts the channel.
- ///
- /// [`is_outbound`]: ChannelDetails::is_outbound
- /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
- /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
- pub confirmations_required: Option<u32>,
- /// The current number of confirmations on the funding transaction.
- ///
- /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113.
- pub confirmations: Option<u32>,
- /// The number of blocks (after our commitment transaction confirms) that we will need to wait
- /// until we can claim our funds after we force-close the channel. During this time our
- /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
- /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any
- /// time to claim our non-HTLC-encumbered funds.
- ///
- /// This value will be `None` for outbound channels until the counterparty accepts the channel.
- pub force_close_spend_delay: Option<u16>,
- /// True if the channel was initiated (and thus funded) by us.
- pub is_outbound: bool,
- /// True if the channel is confirmed, channel_ready messages have been exchanged, and the
- /// channel is not currently being shut down. `channel_ready` message exchange implies the
- /// required confirmation count has been reached (and we were connected to the peer at some
- /// point after the funding transaction received enough confirmations). The required
- /// confirmation count is provided in [`confirmations_required`].
- ///
- /// [`confirmations_required`]: ChannelDetails::confirmations_required
- pub is_channel_ready: bool,
- /// The stage of the channel's shutdown.
- /// `None` for `ChannelDetails` serialized on LDK versions prior to 0.0.116.
- pub channel_shutdown_state: Option<ChannelShutdownState>,
- /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
- /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
- ///
- /// This is a strict superset of `is_channel_ready`.
- pub is_usable: bool,
- /// True if this channel is (or will be) publicly-announced.
- pub is_public: bool,
- /// The smallest value HTLC (in msat) we will accept, for this channel. This field
- /// is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.107
- pub inbound_htlc_minimum_msat: Option<u64>,
- /// The largest value HTLC (in msat) we currently will accept, for this channel.
- pub inbound_htlc_maximum_msat: Option<u64>,
- /// Set of configurable parameters that affect channel operation.
- ///
- /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
- pub config: Option<ChannelConfig>,
- /// Pending inbound HTLCs.
- ///
- /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
- pub pending_inbound_htlcs: Vec<InboundHTLCDetails>,
- /// Pending outbound HTLCs.
- ///
- /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
- pub pending_outbound_htlcs: Vec<OutboundHTLCDetails>,
-}
-
-impl ChannelDetails {
- /// Gets the current SCID which should be used to identify this channel for inbound payments.
- /// This should be used for providing invoice hints or in any other context where our
- /// counterparty will forward a payment to us.
- ///
- /// This is either the [`ChannelDetails::inbound_scid_alias`], if set, or the
- /// [`ChannelDetails::short_channel_id`]. See those for more information.
- pub fn get_inbound_payment_scid(&self) -> Option<u64> {
- self.inbound_scid_alias.or(self.short_channel_id)
- }
-
- /// Gets the current SCID which should be used to identify this channel for outbound payments.
- /// This should be used in [`Route`]s to describe the first hop or in other contexts where
- /// we're sending or forwarding a payment outbound over this channel.
- ///
- /// This is either the [`ChannelDetails::short_channel_id`], if set, or the
- /// [`ChannelDetails::outbound_scid_alias`]. See those for more information.
- pub fn get_outbound_payment_scid(&self) -> Option<u64> {
- self.short_channel_id.or(self.outbound_scid_alias)
- }
-
- fn from_channel_context<SP: Deref, F: Deref>(
- context: &ChannelContext<SP>, best_block_height: u32, latest_features: InitFeatures,
- fee_estimator: &LowerBoundedFeeEstimator<F>
- ) -> Self
- where
- SP::Target: SignerProvider,
- F::Target: FeeEstimator
- {
- let balance = context.get_available_balances(fee_estimator);
- let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
- context.get_holder_counterparty_selected_channel_reserve_satoshis();
- ChannelDetails {
- channel_id: context.channel_id(),
- counterparty: ChannelCounterparty {
- node_id: context.get_counterparty_node_id(),
- features: latest_features,
- unspendable_punishment_reserve: to_remote_reserve_satoshis,
- forwarding_info: context.counterparty_forwarding_info(),
- // Ensures that we have actually received the `htlc_minimum_msat` value
- // from the counterparty through the `OpenChannel` or `AcceptChannel`
- // message (as they are always the first message from the counterparty).
- // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
- // default `0` value set by `Channel::new_outbound`.
- outbound_htlc_minimum_msat: if context.have_received_message() {
- Some(context.get_counterparty_htlc_minimum_msat()) } else { None },
- outbound_htlc_maximum_msat: context.get_counterparty_htlc_maximum_msat(),
- },
- funding_txo: context.get_funding_txo(),
- // Note that accept_channel (or open_channel) is always the first message, so
- // `have_received_message` indicates that type negotiation has completed.
- channel_type: if context.have_received_message() { Some(context.get_channel_type().clone()) } else { None },
- short_channel_id: context.get_short_channel_id(),
- outbound_scid_alias: if context.is_usable() { Some(context.outbound_scid_alias()) } else { None },
- inbound_scid_alias: context.latest_inbound_scid_alias(),
- channel_value_satoshis: context.get_value_satoshis(),
- feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()),
- unspendable_punishment_reserve: to_self_reserve_satoshis,
- balance_msat: balance.balance_msat,
- inbound_capacity_msat: balance.inbound_capacity_msat,
- outbound_capacity_msat: balance.outbound_capacity_msat,
- next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
- next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
- user_channel_id: context.get_user_id(),
- confirmations_required: context.minimum_depth(),
- confirmations: Some(context.get_funding_tx_confirmations(best_block_height)),
- force_close_spend_delay: context.get_counterparty_selected_contest_delay(),
- is_outbound: context.is_outbound(),
- is_channel_ready: context.is_usable(),
- is_usable: context.is_live(),
- is_public: context.should_announce(),
- inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()),
- inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
- config: Some(context.config()),
- channel_shutdown_state: Some(context.shutdown_state()),
- pending_inbound_htlcs: context.get_pending_inbound_htlc_details(),
- pending_outbound_htlcs: context.get_pending_outbound_htlc_details(),
- }
- }
-}
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-/// Further information on the details of the channel shutdown.
-/// Upon channels being forced closed (i.e. commitment transaction confirmation detected
-/// by `ChainMonitor`), ChannelShutdownState will be set to `ShutdownComplete` or
-/// the channel will be removed shortly.
-/// Also note, that in normal operation, peers could disconnect at any of these states
-/// and require peer re-connection before making progress onto other states
-pub enum ChannelShutdownState {
- /// Channel has not sent or received a shutdown message.
- NotShuttingDown,
- /// Local node has sent a shutdown message for this channel.
- ShutdownInitiated,
- /// Shutdown message exchanges have concluded and the channels are in the midst of
- /// resolving all existing open HTLCs before closing can continue.
- ResolvingHTLCs,
- /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates.
- NegotiatingClosingFee,
- /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about
- /// to drop the channel.
- ShutdownComplete,
-}
+/// The maximum expiration from the current time where an [`Offer`] or [`Refund`] is considered
+/// short-lived, while anything with a greater expiration is considered long-lived.
+///
+/// Using [`ChannelManager::create_offer_builder`] or [`ChannelManager::create_refund_builder`],
+/// will included a [`BlindedPath`] created using:
+/// - [`MessageRouter::create_compact_blinded_paths`] when short-lived, and
+/// - [`MessageRouter::create_blinded_paths`] when long-lived.
+///
+/// Using compact [`BlindedPath`]s may provide better privacy as the [`MessageRouter`] could select
+/// more hops. However, since they use short channel ids instead of pubkeys, they are more likely to
+/// become invalid over time as channels are closed. Thus, they are only suitable for short-term use.
+pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24);
/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
/// These include payments that have yet to find a successful path, or have unresolved HTLCs.
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
- let mut msg_events = Vec::with_capacity(2);
+ let mut msg_event = None;
if let Some((shutdown_res, update_option)) = shutdown_finish {
let counterparty_node_id = shutdown_res.counterparty_node_id;
let channel_id = shutdown_res.channel_id;
let logger = WithContext::from(
- &$self.logger, Some(counterparty_node_id), Some(channel_id),
+ &$self.logger, Some(counterparty_node_id), Some(channel_id), None
);
log_error!(logger, "Force-closing channel: {}", err.err);
$self.finish_close_channel(shutdown_res);
if let Some(update) = update_option {
- msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
if let msgs::ErrorAction::IgnoreError = err.action {
} else {
- msg_events.push(events::MessageSendEvent::HandleError {
+ msg_event = Some(events::MessageSendEvent::HandleError {
node_id: $counterparty_node_id,
action: err.action.clone()
});
}
- if !msg_events.is_empty() {
+ if let Some(msg_event) = msg_event {
let per_peer_state = $self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap();
- peer_state.pending_msg_events.append(&mut msg_events);
+ peer_state.pending_msg_events.push(msg_event);
}
}
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
},
ChannelError::Close(msg) => {
- let logger = WithChannelContext::from(&$self.logger, &$channel.context);
+ let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
update_maps_on_chan_removal!($self, $channel.context);
let reason = ClosureReason::ProcessingError { err: msg.clone() };
ChannelPhase::UnfundedInboundV1(channel) => {
convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
},
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(channel) => {
+ convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
+ },
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(channel) => {
+ convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
+ },
}
};
}
macro_rules! handle_monitor_update_completion {
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
- let logger = WithChannelContext::from(&$self.logger, &$chan.context);
+ let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
let mut updates = $chan.monitor_updating_restored(&&logger,
&$self.node_signer, $self.chain_hash, &$self.default_configuration,
- $self.best_block.read().unwrap().height());
+ $self.best_block.read().unwrap().height);
let counterparty_node_id = $chan.context.get_counterparty_node_id();
let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
// We only send a channel_update in the case where we are just now sending a
let update_actions = $peer_state.monitor_update_blocked_actions
.remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
- let htlc_forwards = $self.handle_channel_resumption(
+ let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
&mut $peer_state.pending_msg_events, $chan, updates.raa,
- updates.commitment_update, updates.order, updates.accepted_htlcs,
+ updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
updates.funding_broadcastable, updates.channel_ready,
updates.announcement_sigs);
if let Some(upd) = channel_update {
if let Some(forwards) = htlc_forwards {
$self.forward_htlcs(&mut [forwards][..]);
}
+ if let Some(decode) = decode_update_add_htlcs {
+ $self.push_decode_update_add_htlcs(decode);
+ }
$self.finalize_claims(updates.finalized_claimed_htlcs);
for failure in updates.failed_htlcs.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
macro_rules! handle_new_monitor_update {
($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
- let logger = WithChannelContext::from(&$self.logger, &$chan.context);
+ let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
match $update_res {
ChannelMonitorUpdateStatus::UnrecoverableError => {
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
pending_inbound_payments: Mutex::new(new_hash_map()),
pending_outbound_payments: OutboundPayments::new(),
forward_htlcs: Mutex::new(new_hash_map()),
+ decode_update_add_htlcs: Mutex::new(new_hash_map()),
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
pending_intercepted_htlcs: Mutex::new(new_hash_map()),
outpoint_to_peer: Mutex::new(new_hash_map()),
funding_batch_states: Mutex::new(BTreeMap::new()),
pending_offers_messages: Mutex::new(Vec::new()),
+ pending_broadcast_messages: Mutex::new(Vec::new()),
entropy_source,
node_signer,
}
fn create_and_insert_outbound_scid_alias(&self) -> u64 {
- let height = self.best_block.read().unwrap().height();
+ let height = self.best_block.read().unwrap().height;
let mut outbound_scid_alias = 0;
let mut i = 0;
loop {
let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
their_features, channel_value_satoshis, push_msat, user_channel_id, config,
- self.best_block.read().unwrap().height(), outbound_scid_alias, temporary_channel_id)
+ self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id)
{
Ok(res) => res,
Err(e) => {
// the same channel.
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
// the same channel.
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
/// Gets the list of channels we have with a given counterparty, in random order.
pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
}
let logger = WithContext::from(
- &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
+ &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
);
log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
} else {
ClosureReason::HolderForceClosed
};
- let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
+ let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
log_error!(logger, "Force-closing channel {}", channel_id);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
// Unfunded channel has no update
(None, chan_phase.context().get_counterparty_node_id())
},
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
+ self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
+ // Unfunded channel has no update
+ (None, chan_phase.context().get_counterparty_node_id())
+ },
}
} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
log_error!(logger, "Force-closing channel {}", &channel_id);
}
};
if let Some(update) = update_opt {
- // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
- // not try to broadcast it via whatever peer we have.
- let per_peer_state = self.per_peer_state.read().unwrap();
- let a_peer_state_opt = per_peer_state.get(peer_node_id)
- .ok_or(per_peer_state.values().next());
- if let Ok(a_peer_state_mutex) = a_peer_state_opt {
- let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
- a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
+ // If we have some Channel Update to broadcast, we cache it and broadcast it later.
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
}
Ok(counterparty_node_id)
}
- fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
+ fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String)
+ -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ log_debug!(self.logger,
+ "Force-closing channel, The error message sent to the peer : {}", error_message);
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
let per_peer_state = self.per_peer_state.read().unwrap();
peer_state.pending_msg_events.push(
events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
- action: msgs::ErrorAction::DisconnectPeer {
- msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() })
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message }
},
}
);
}
}
- /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
- /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
- /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
- /// channel.
- pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
+ /// Force closes a channel, immediately broadcasting the latest local transaction(s),
+ /// rejecting new HTLCs.
+ ///
+ /// The provided `error_message` is sent to connected peers for closing
+ /// channels and should be a human-readable description of what went wrong.
+ ///
+ /// Fails if `channel_id` is unknown to the manager, or if the `counterparty_node_id`
+ /// isn't the counterparty of the corresponding channel.
+ pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
-> Result<(), APIError> {
- self.force_close_sending_error(channel_id, counterparty_node_id, true)
+ self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message)
}
/// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
- /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
- /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
+ /// the latest local transaction(s).
///
+ /// The provided `error_message` is sent to connected peers for closing channels and should
+ /// be a human-readable description of what went wrong.
+ ///
+ /// Fails if `channel_id` is unknown to the manager, or if the
+ /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
/// You can always broadcast the latest local transaction(s) via
/// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
- pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
+ pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
-> Result<(), APIError> {
- self.force_close_sending_error(channel_id, counterparty_node_id, false)
+ self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message)
}
/// Force close all channels, immediately broadcasting the latest local commitment transaction
/// for each to the chain and rejecting new HTLCs on each.
- pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
+ ///
+ /// The provided `error_message` is sent to connected peers for closing channels and should
+ /// be a human-readable description of what went wrong.
+ pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
for chan in self.list_channels() {
- let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
+ let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
}
}
/// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
/// local transaction(s).
- pub fn force_close_all_channels_without_broadcasting_txn(&self) {
+ ///
+ /// The provided `error_message` is sent to connected peers for closing channels and
+ /// should be a human-readable description of what went wrong.
+ pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) {
for chan in self.list_channels() {
- let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
+ let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
+ }
+ }
+
+ fn can_forward_htlc_to_outgoing_channel(
+ &self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
+ ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
+ if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+ // Note that the behavior here should be identical to the above block - we
+ // should NOT reveal the existence or non-existence of a private channel if
+ // we don't allow forwards outbound over them.
+ return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
+ }
+ if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
+ // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
+ // "refuse to forward unless the SCID alias was used", so we pretend
+ // we don't have the channel here.
+ return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
+ }
+
+ // Note that we could technically not return an error yet here and just hope
+ // that the connection is reestablished or monitor updated by the time we get
+ // around to doing the actual forward, but better to fail early if we can and
+ // hopefully an attacker trying to path-trace payments cannot make this occur
+ // on a small/per-node/per-channel scale.
+ if !chan.context.is_live() { // channel_disabled
+ // If the channel_update we're going to return is disabled (i.e. the
+ // peer has been disabled for some time), return `channel_disabled`,
+ // otherwise return `temporary_channel_failure`.
+ let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
+ if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
+ return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
+ } else {
+ return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
+ }
+ }
+ if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+ let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
+ return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
+ }
+ if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
+ let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
+ return Err((err, code, chan_update_opt));
+ }
+
+ Ok(())
+ }
+
+ /// Executes a callback `C` that returns some value `X` on the channel found with the given
+ /// `scid`. `None` is returned when the channel is not found.
+ fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
+ &self, scid: u64, callback: C,
+ ) -> Option<X> {
+ let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
+ None => return None,
+ Some((cp_id, id)) => (cp_id, id),
+ };
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() {
+ return None;
+ }
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.get_mut(&channel_id).and_then(
+ |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
+ ) {
+ None => None,
+ Some(chan) => Some(callback(chan)),
+ }
+ }
+
+ fn can_forward_htlc(
+ &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
+ ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
+ match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
+ self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
+ }) {
+ Some(Ok(())) => {},
+ Some(Err(e)) => return Err(e),
+ None => {
+ // If we couldn't find the channel info for the scid, it may be a phantom or
+ // intercept forward.
+ if (self.default_configuration.accept_intercept_htlcs &&
+ fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
+ fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
+ {} else {
+ return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ }
+ }
+ }
+
+ let cur_height = self.best_block.read().unwrap().height + 1;
+ if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
+ cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
+ ) {
+ let chan_update_opt = self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
+ self.get_channel_update_for_onion(next_packet_details.outgoing_scid, chan).ok()
+ }).flatten();
+ return Err((err_msg, err_code, chan_update_opt));
+ }
+
+ Ok(())
+ }
+
+ fn htlc_failure_from_update_add_err(
+ &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
+ mut err_code: u16, chan_update: Option<msgs::ChannelUpdate>, is_intro_node_blinded_forward: bool,
+ shared_secret: &[u8; 32]
+ ) -> HTLCFailureMsg {
+ let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
+ if chan_update.is_some() && err_code & 0x1000 == 0x1000 {
+ let chan_update = chan_update.unwrap();
+ if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
+ msg.amount_msat.write(&mut res).expect("Writes cannot fail");
+ }
+ else if err_code == 0x1000 | 13 {
+ msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
+ }
+ else if err_code == 0x1000 | 20 {
+ // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+ 0u16.write(&mut res).expect("Writes cannot fail");
+ }
+ (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
+ msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
+ chan_update.write(&mut res).expect("Writes cannot fail");
+ } else if err_code & 0x1000 == 0x1000 {
+ // If we're trying to return an error that requires a `channel_update` but
+ // we're forwarding to a phantom or intercept "channel" (i.e. cannot
+ // generate an update), just use the generic "temporary_node_failure"
+ // instead.
+ err_code = 0x2000 | 2;
+ }
+
+ log_info!(
+ WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
+ "Failed to accept/forward incoming HTLC: {}", err_msg
+ );
+ // If `msg.blinding_point` is set, we must always fail with malformed.
+ if msg.blinding_point.is_some() {
+ return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ });
}
+
+ let (err_code, err_data) = if is_intro_node_blinded_forward {
+ (INVALID_ONION_BLINDING, &[0; 32][..])
+ } else {
+ (err_code, &res.0[..])
+ };
+ HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason: HTLCFailReason::reason(err_code, err_data.to_vec())
+ .get_encrypted_failure_packet(shared_secret, &None),
+ })
}
fn decode_update_add_htlc_onion(
msg, &self.node_signer, &self.logger, &self.secp_ctx
)?;
- let is_intro_node_forward = match next_hop {
- onion_utils::Hop::Forward {
- next_hop_data: msgs::InboundOnionPayload::BlindedForward {
- intro_node_blinding_point: Some(_), ..
- }, ..
- } => true,
- _ => false,
- };
-
- macro_rules! return_err {
- ($msg: expr, $err_code: expr, $data: expr) => {
- {
- log_info!(
- WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
- "Failed to accept/forward incoming HTLC: {}", $msg
- );
- // If `msg.blinding_point` is set, we must always fail with malformed.
- if msg.blinding_point.is_some() {
- return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- sha256_of_onion: [0; 32],
- failure_code: INVALID_ONION_BLINDING,
- }));
- }
-
- let (err_code, err_data) = if is_intro_node_forward {
- (INVALID_ONION_BLINDING, &[0; 32][..])
- } else { ($err_code, $data) };
- return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- reason: HTLCFailReason::reason(err_code, err_data.to_vec())
- .get_encrypted_failure_packet(&shared_secret, &None),
- }));
- }
- }
- }
-
- let NextPacketDetails {
- next_packet_pubkey, outgoing_amt_msat, outgoing_scid, outgoing_cltv_value
- } = match next_packet_details_opt {
+ let next_packet_details = match next_packet_details_opt {
Some(next_packet_details) => next_packet_details,
// it is a receive, so no need for outbound checks
None => return Ok((next_hop, shared_secret, None)),
// Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
// can't hold the outbound peer state lock at the same time as the inbound peer state lock.
- if let Some((err, mut code, chan_update)) = loop {
- let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
- let forwarding_chan_info_opt = match id_option {
- None => { // unknown_next_peer
- // Note that this is likely a timing oracle for detecting whether an scid is a
- // phantom or an intercept.
- if (self.default_configuration.accept_intercept_htlcs &&
- fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
- fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
- {
- None
- } else {
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- }
- },
- Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
- };
- let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if peer_state_mutex_opt.is_none() {
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map(
- |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
- ).flatten() {
- None => {
- // Channel was removed. The short_to_chan_info and channel_by_id maps
- // have no consistency guarantees.
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- },
- Some(chan) => chan
- };
- if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
- // Note that the behavior here should be identical to the above block - we
- // should NOT reveal the existence or non-existence of a private channel if
- // we don't allow forwards outbound over them.
- break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
- }
- if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
- // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
- // "refuse to forward unless the SCID alias was used", so we pretend
- // we don't have the channel here.
- break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
- }
- let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
-
- // Note that we could technically not return an error yet here and just hope
- // that the connection is reestablished or monitor updated by the time we get
- // around to doing the actual forward, but better to fail early if we can and
- // hopefully an attacker trying to path-trace payments cannot make this occur
- // on a small/per-node/per-channel scale.
- if !chan.context.is_live() { // channel_disabled
- // If the channel_update we're going to return is disabled (i.e. the
- // peer has been disabled for some time), return `channel_disabled`,
- // otherwise return `temporary_channel_failure`.
- if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
- break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
- } else {
- break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
- }
- }
- if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
- break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
- }
- if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
- break Some((err, code, chan_update_opt));
- }
- chan_update_opt
- } else {
- None
- };
-
- let cur_height = self.best_block.read().unwrap().height() + 1;
-
- if let Err((err_msg, code)) = check_incoming_htlc_cltv(
- cur_height, outgoing_cltv_value, msg.cltv_expiry
- ) {
- if code & 0x1000 != 0 && chan_update_opt.is_none() {
- // We really should set `incorrect_cltv_expiry` here but as we're not
- // forwarding over a real channel we can't generate a channel_update
- // for it. Instead we just return a generic temporary_node_failure.
- break Some((err_msg, 0x2000 | 2, None))
- }
- let chan_update_opt = if code & 0x1000 != 0 { chan_update_opt } else { None };
- break Some((err_msg, code, chan_update_opt));
- }
+ self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
+ let (err_msg, err_code, chan_update_opt) = e;
+ self.htlc_failure_from_update_add_err(
+ msg, counterparty_node_id, err_msg, err_code, chan_update_opt,
+ next_hop.is_intro_node_blinded_forward(), &shared_secret
+ )
+ })?;
- break None;
- }
- {
- let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
- if let Some(chan_update) = chan_update {
- if code == 0x1000 | 11 || code == 0x1000 | 12 {
- msg.amount_msat.write(&mut res).expect("Writes cannot fail");
- }
- else if code == 0x1000 | 13 {
- msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
- }
- else if code == 0x1000 | 20 {
- // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
- 0u16.write(&mut res).expect("Writes cannot fail");
- }
- (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
- msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
- chan_update.write(&mut res).expect("Writes cannot fail");
- } else if code & 0x1000 == 0x1000 {
- // If we're trying to return an error that requires a `channel_update` but
- // we're forwarding to a phantom or intercept "channel" (i.e. cannot
- // generate an update), just use the generic "temporary_node_failure"
- // instead.
- code = 0x2000 | 2;
- }
- return_err!(err, code, &res.0[..]);
- }
- Ok((next_hop, shared_secret, Some(next_packet_pubkey)))
+ Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
}
fn construct_pending_htlc_status<'a>(
macro_rules! return_err {
($msg: expr, $err_code: expr, $data: expr) => {
{
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
if msg.blinding_point.is_some() {
return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
match decoded_hop {
onion_utils::Hop::Receive(next_hop_data) => {
// OUR PAYMENT!
- let current_height: u32 = self.best_block.read().unwrap().height();
+ let current_height: u32 = self.best_block.read().unwrap().height;
match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
current_height, self.default_configuration.accept_mpp_keysend)
if chan.context.get_short_channel_id().is_none() {
return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
}
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
self.get_channel_update_for_unicast(chan)
}
/// [`channel_update`]: msgs::ChannelUpdate
/// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
}
fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
let _lck = self.total_consistency_lock.read().unwrap();
self.send_payment_along_path(SendAlongPathArgs {
- path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage,
- session_priv_bytes
+ path, payment_hash, recipient_onion: &recipient_onion, total_value,
+ cur_height, payment_id, keysend_preimage, session_priv_bytes
})
}
&self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
payment_hash, keysend_preimage, prng_seed
).map_err(|e| {
- let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+ let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
e
})?;
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
None => {
- let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+ let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
},
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
};
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id));
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id), Some(*payment_hash));
log_trace!(logger,
"Attempting to send payment with payment hash {} along path with next hop {}",
payment_hash, path.hops.first().unwrap().short_channel_id);
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
}
let funding_txo = chan.context.get_funding_txo().unwrap();
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash));
let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
htlc_cltv, HTLCSource::OutboundRoute {
path: path.clone(),
/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment_with_route(route, payment_hash, recipient_onion, payment_id,
/// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
/// `route_params` and retry failed payment paths based on `retry_strategy`.
pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
#[cfg(test)]
pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
#[cfg(test)]
pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
}
}
pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment_for_bolt12_invoice(
///
/// [`send_payment`]: Self::send_payment
pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment_with_route(
route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
///
/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
/// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
/// us to easily discern them from real payments.
pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
&self.entropy_source, &self.node_signer, best_block_height,
/// Handles the generation of a funding transaction, optionally (for tests) with a function
/// which checks the correctness of the funding transaction given the associated channel.
- fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
+ fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, &'static str>>(
&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
mut find_funding_output: FundingOutput,
) -> Result<(), APIError> {
let funding_txo;
let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
- funding_txo = find_funding_output(&chan, &funding_transaction)?;
-
- let logger = WithChannelContext::from(&self.logger, &chan.context);
- let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
- .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
- let channel_id = chan.context.channel_id();
+ macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
+ let counterparty;
+ let err = if let ChannelError::Close(msg) = $err {
+ let channel_id = $chan.context.channel_id();
+ counterparty = chan.context.get_counterparty_node_id();
let reason = ClosureReason::ProcessingError { err: msg.clone() };
- let shutdown_res = chan.context.force_shutdown(false, reason);
- (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None))
- } else { unreachable!(); });
+ let shutdown_res = $chan.context.force_shutdown(false, reason);
+ MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
+ } else { unreachable!(); };
+
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+ let _: Result<(), _> = handle_error!(self, Err(err), counterparty);
+ Err($api_err)
+ } } }
+ match find_funding_output(&chan, &funding_transaction) {
+ Ok(found_funding_txo) => funding_txo = found_funding_txo,
+ Err(err) => {
+ let chan_err = ChannelError::Close(err.to_owned());
+ let api_err = APIError::APIMisuseError { err: err.to_owned() };
+ return close_chan!(chan_err, api_err, chan);
+ },
+ }
+
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
+ let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
match funding_res {
Ok(funding_msg) => (chan, funding_msg),
- Err((chan, err)) => {
- mem::drop(peer_state_lock);
- mem::drop(per_peer_state);
- let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id());
- return Err(APIError::ChannelUnavailable {
- err: "Signer refused to sign the initial commitment transaction".to_owned()
- });
- },
+ Err((mut chan, chan_err)) => {
+ let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() };
+ return close_chan!(chan_err, api_err, chan);
+ }
}
},
Some(phase) => {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut result = Ok(());
- if !funding_transaction.is_coin_base() {
+ if !funding_transaction.is_coinbase() {
for inp in funding_transaction.input.iter() {
if inp.witness.is_empty() {
result = result.and(Err(APIError::APIMisuseError {
}));
}
{
- let height = self.best_block.read().unwrap().height();
+ let height = self.best_block.read().unwrap().height;
// Transactions are evaluated as final by network mempools if their locktime is strictly
// lower than the next block height. However, the modules constituting our Lightning
// node might not have perfect sync about their blockchain views. Thus, if the wallet
is_batch_funding,
|chan, tx| {
let mut output_index = None;
- let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
+ let expected_spk = chan.context.get_funding_redeemscript().to_p2wsh();
for (idx, outp) in tx.output.iter().enumerate() {
- if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
+ if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.context.get_value_satoshis() {
if output_index.is_some() {
- return Err(APIError::APIMisuseError {
- err: "Multiple outputs matched the expected script and value".to_owned()
- });
+ return Err("Multiple outputs matched the expected script and value");
}
output_index = Some(idx as u16);
}
}
if output_index.is_none() {
- return Err(APIError::APIMisuseError {
- err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
- });
+ return Err("No output matched the script_pubkey and value in the FundingGenerationReady event");
}
let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
if let Some(funding_batch_state) = funding_batch_state.as_mut() {
for (channel_id, counterparty_node_id) in channels_to_remove {
per_peer_state.get(&counterparty_node_id)
.map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
- .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
- .map(|mut chan| {
+ .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state)))
+ .map(|(mut chan, mut peer_state)| {
update_maps_on_chan_removal!(self, &chan.context());
let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason));
+ peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id,
+ data: "Failed to fund channel".to_owned(),
+ }
+ },
+ });
});
}
}
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+
for channel_id in channel_ids {
if !peer_state.has_channel(channel_id) {
return Err(APIError::ChannelUnavailable {
}
if let ChannelPhase::Funded(channel) = channel_phase {
if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
} else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: channel.context.get_counterparty_node_id(),
None => {
let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
next_hop_channel_id, next_node_id);
- let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id));
+ let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id), None);
log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
return Err(APIError::ChannelUnavailable {
err: error
Ok(())
}
+ fn process_pending_update_add_htlcs(&self) {
+ let mut decode_update_add_htlcs = new_hash_map();
+ mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
+
+ let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
+ if let Some(outgoing_scid) = outgoing_scid_opt {
+ match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
+ Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
+ HTLCDestination::NextHopChannel {
+ node_id: Some(*outgoing_counterparty_node_id),
+ channel_id: *outgoing_channel_id,
+ },
+ None => HTLCDestination::UnknownNextHop {
+ requested_forward_scid: outgoing_scid,
+ },
+ }
+ } else {
+ HTLCDestination::FailedPayment { payment_hash }
+ }
+ };
+
+ 'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
+ let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
+ let counterparty_node_id = chan.context.get_counterparty_node_id();
+ let channel_id = chan.context.channel_id();
+ let funding_txo = chan.context.get_funding_txo().unwrap();
+ let user_channel_id = chan.context.get_user_id();
+ let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
+ (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
+ });
+ let (
+ incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
+ incoming_user_channel_id, incoming_accept_underpaying_htlcs
+ ) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
+ incoming_channel_details
+ } else {
+ // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
+ continue;
+ };
+
+ let mut htlc_forwards = Vec::new();
+ let mut htlc_fails = Vec::new();
+ for update_add_htlc in &update_add_htlcs {
+ let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
+ &update_add_htlc, &self.node_signer, &self.logger, &self.secp_ctx
+ ) {
+ Ok(decoded_onion) => decoded_onion,
+ Err(htlc_fail) => {
+ htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
+ continue;
+ },
+ };
+
+ let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
+ let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
+
+ // Process the HTLC on the incoming channel.
+ match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash));
+ chan.can_accept_incoming_htlc(
+ update_add_htlc, &self.fee_estimator, &logger,
+ )
+ }) {
+ Some(Ok(_)) => {},
+ Some(Err((err, code))) => {
+ let outgoing_chan_update_opt = if let Some(outgoing_scid) = outgoing_scid_opt.as_ref() {
+ self.do_funded_channel_callback(*outgoing_scid, |chan: &mut Channel<SP>| {
+ self.get_channel_update_for_onion(*outgoing_scid, chan).ok()
+ }).flatten()
+ } else {
+ None
+ };
+ let htlc_fail = self.htlc_failure_from_update_add_err(
+ &update_add_htlc, &incoming_counterparty_node_id, err, code,
+ outgoing_chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
+ );
+ let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
+ htlc_fails.push((htlc_fail, htlc_destination));
+ continue;
+ },
+ // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
+ None => continue 'outer_loop,
+ }
+
+ // Now process the HTLC on the outgoing channel if it's a forward.
+ if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
+ if let Err((err, code, chan_update_opt)) = self.can_forward_htlc(
+ &update_add_htlc, next_packet_details
+ ) {
+ let htlc_fail = self.htlc_failure_from_update_add_err(
+ &update_add_htlc, &incoming_counterparty_node_id, err, code,
+ chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
+ );
+ let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
+ htlc_fails.push((htlc_fail, htlc_destination));
+ continue;
+ }
+ }
+
+ match self.construct_pending_htlc_status(
+ &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
+ incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
+ ) {
+ PendingHTLCStatus::Forward(htlc_forward) => {
+ htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
+ },
+ PendingHTLCStatus::Fail(htlc_fail) => {
+ let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
+ htlc_fails.push((htlc_fail, htlc_destination));
+ },
+ }
+ }
+
+ // Process all of the forwards and failures for the channel in which the HTLCs were
+ // proposed to as a batch.
+ let pending_forwards = (incoming_scid, incoming_funding_txo, incoming_channel_id,
+ incoming_user_channel_id, htlc_forwards.drain(..).collect());
+ self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
+ for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
+ let failure = match htlc_fail {
+ HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
+ htlc_id: fail_htlc.htlc_id,
+ err_packet: fail_htlc.reason,
+ },
+ HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
+ htlc_id: fail_malformed_htlc.htlc_id,
+ sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
+ failure_code: fail_malformed_htlc.failure_code,
+ },
+ };
+ self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_insert(vec![]).push(failure);
+ self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
+ prev_channel_id: incoming_channel_id,
+ failed_next_destination: htlc_destination,
+ }, None));
+ }
+ }
+ }
+
/// Processes HTLCs which are pending waiting on random forward delay.
///
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
pub fn process_pending_htlc_forwards(&self) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ self.process_pending_update_add_htlcs();
+
let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
}) => {
macro_rules! failure_handler {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
+ let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
};
match next_hop {
onion_utils::Hop::Receive(hop_data) => {
- let current_height: u32 = self.best_block.read().unwrap().height();
+ let current_height: u32 = self.best_block.read().unwrap().height;
match create_recv_pending_htlc_info(hop_data,
incoming_shared_secret, payment_hash, outgoing_amt_msat,
outgoing_cltv_value, Some(phantom_shared_secret), false, None,
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
for forward_info in pending_forwards.drain(..) {
let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
}, skimmed_fee_msat, ..
},
}) => {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash));
log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
}
}) => {
let blinded_failure = routing.blinded_failure();
- let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
+ let (cltv_expiry, onion_payload, payment_data, payment_context, phantom_shared_secret, mut onion_fields) = match routing {
PendingHTLCRouting::Receive {
- payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret,
- custom_tlvs, requires_blinded_error: _
+ payment_data, payment_metadata, payment_context,
+ incoming_cltv_expiry, phantom_shared_secret, custom_tlvs,
+ requires_blinded_error: _
} => {
let _legacy_hop_data = Some(payment_data.clone());
let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
payment_metadata, custom_tlvs };
(incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
- Some(payment_data), phantom_shared_secret, onion_fields)
+ Some(payment_data), payment_context, phantom_shared_secret, onion_fields)
},
- PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => {
+ PendingHTLCRouting::ReceiveKeysend {
+ payment_data, payment_preimage, payment_metadata,
+ incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _
+ } => {
let onion_fields = RecipientOnionFields {
payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
payment_metadata,
custom_tlvs,
};
(incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
- payment_data, None, onion_fields)
+ payment_data, None, None, onion_fields)
},
_ => {
panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
debug_assert!(!committed_to_claimable);
let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(
- &self.best_block.read().unwrap().height().to_be_bytes(),
+ &self.best_block.read().unwrap().height.to_be_bytes(),
);
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: $htlc.prev_hop.short_channel_id,
macro_rules! check_total_value {
($purpose: expr) => {{
let mut payment_claimable_generated = false;
- let is_keysend = match $purpose {
- events::PaymentPurpose::SpontaneousPayment(_) => true,
- events::PaymentPurpose::InvoicePayment { .. } => false,
- };
+ let is_keysend = $purpose.is_keysend();
let mut claimable_payments = self.claimable_payments.lock().unwrap();
if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
fail_htlc!(claimable_htlc, payment_hash);
}
};
if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
- let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64;
+ let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
if (cltv_expiry as u64) < expected_min_expiry_height {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
&payment_hash, cltv_expiry, expected_min_expiry_height);
fail_htlc!(claimable_htlc, payment_hash);
}
}
- let purpose = events::PaymentPurpose::InvoicePayment {
- payment_preimage: payment_preimage.clone(),
- payment_secret: payment_data.payment_secret,
- };
+ let purpose = events::PaymentPurpose::from_parts(
+ payment_preimage,
+ payment_data.payment_secret,
+ payment_context,
+ );
check_total_value!(purpose);
},
OnionPayload::Spontaneous(preimage) => {
&payment_hash, payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
fail_htlc!(claimable_htlc, payment_hash);
} else {
- let purpose = events::PaymentPurpose::InvoicePayment {
- payment_preimage: inbound_payment.get().payment_preimage,
- payment_secret: payment_data.payment_secret,
- };
+ let purpose = events::PaymentPurpose::from_parts(
+ inbound_payment.get().payment_preimage,
+ payment_data.payment_secret,
+ payment_context,
+ );
let payment_claimable_generated = check_total_value!(purpose);
if payment_claimable_generated {
inbound_payment.remove_entry();
}
}
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
- if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
- log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.",
- chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- }
return NotifyOption::SkipPersistNoEvents;
}
if !chan.context.is_live() {
| {
context.maybe_expire_prev_config();
if unfunded_context.should_expire_unfunded_channel() {
- let logger = WithChannelContext::from(&self.logger, context);
+ let logger = WithChannelContext::from(&self.logger, context, None);
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
if n >= DISABLE_GOSSIP_TICKS {
chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
if n >= ENABLE_GOSSIP_TICKS {
chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
chan.context.maybe_expire_prev_config();
if chan.should_disconnect_peer_awaiting_response() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
counterparty_node_id, chan_id);
pending_msg_events.push(MessageSendEvent::HandleError {
process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
pending_msg_events, counterparty_node_id)
},
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(chan) => {
+ process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+ pending_msg_events, counterparty_node_id)
+ },
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(chan) => {
+ process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+ pending_msg_events, counterparty_node_id)
+ },
}
});
for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id));
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None);
log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
peer_state.pending_msg_events.push(
events::MessageSendEvent::HandleError {
FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
- htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+ htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
},
FailureCode::InvalidOnionPayload(data) => {
}
}
+ fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
+ let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
+ if push_forward_event { self.push_pending_forwards_ev(); }
+ }
+
/// Fails an HTLC backwards to the sender of it to us.
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
- fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
+ fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
// Ensure that no peer state channel storage lock is held when calling this function.
// This ensures that future code doesn't introduce a lock-order requirement for
// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
+ let mut push_forward_event;
match source {
HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
- if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
+ push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
- &self.pending_events, &self.logger)
- { self.push_pending_forwards_ev(); }
+ &self.pending_events, &self.logger);
},
HTLCSource::PreviousHopData(HTLCPreviousHopData {
ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
}) => {
log_trace!(
- WithContext::from(&self.logger, None, Some(*channel_id)),
+ WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
);
}
};
- let mut push_forward_ev = false;
+ push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
- if forward_htlcs.is_empty() {
- push_forward_ev = true;
- }
+ push_forward_event &= forward_htlcs.is_empty();
match forward_htlcs.entry(*short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(failure);
}
}
mem::drop(forward_htlcs);
- if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::HTLCHandlingFailed {
prev_channel_id: *channel_id,
}, None));
},
}
+ push_forward_event
}
/// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
}
}
- let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
- let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
- let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash,
- ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
- payment_purpose: payment.purpose, receiver_node_id, htlcs, sender_intended_value
- });
- if dup_purpose.is_some() {
- debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
- log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
- &payment_hash);
- }
+ let claiming_payment = claimable_payments.pending_claiming_payments
+ .entry(payment_hash)
+ .and_modify(|_| {
+ debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
+ log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
+ &payment_hash);
+ })
+ .or_insert_with(|| {
+ let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
+ let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
+ ClaimingPayment {
+ amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
+ payment_purpose: payment.purpose,
+ receiver_node_id,
+ htlcs,
+ sender_intended_value,
+ onion_fields: payment.onion_fields,
+ }
+ });
- if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = payment.onion_fields {
+ if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = claiming_payment.onion_fields {
if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
log_info!(self.logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
&payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
// HTLC when the monitor updating is restored (or on chain).
- let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
+ let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash));
log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
} else { errs.push((pk, err)); }
}
if !valid_mpp {
for htlc in sources.drain(..) {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
- htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+ htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
let receiver = HTLCDestination::FailedPayment { payment_hash };
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let counterparty_node_id = chan.context.get_counterparty_node_id();
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger);
match fulfill_res {
// with a preimage we *must* somehow manage to propagate it to the upstream
// channel, or we must have an ability to receive the same event and try
// again on restart.
- log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
+ log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id), None),
"Critical error: failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, update_res);
}
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
- next_channel_outpoint: OutPoint, next_channel_id: ChannelId,
+ next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_channel_id = hop_data.channel_id;
+ let prev_user_channel_id = hop_data.user_channel_id;
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
- #[cfg(debug_assertions)]
- let claiming_channel_id = hop_data.channel_id;
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
BackgroundEvent::MonitorUpdatesComplete {
channel_id, ..
} =>
- *channel_id == claiming_channel_id,
+ *channel_id == prev_channel_id,
}
}), "{:?}", *background_events);
}
"skimmed_fee_msat must always be included in total_fee_earned_msat");
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
- total_fee_earned_msat,
- claim_from_onchain_tx: from_onchain,
prev_channel_id: Some(prev_channel_id),
next_channel_id: Some(next_channel_id),
- outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+ prev_user_channel_id,
+ next_user_channel_id,
+ total_fee_earned_msat,
skimmed_fee_msat,
+ claim_from_onchain_tx: from_onchain,
+ outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
})
receiver_node_id,
htlcs,
sender_intended_value: sender_intended_total_msat,
+ onion_fields,
}) = payment {
self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed {
payment_hash,
receiver_node_id: Some(receiver_node_id),
htlcs,
sender_intended_total_msat,
+ onion_fields,
}, None));
}
},
fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
- pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
+ pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
+ funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
- -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> {
- let logger = WithChannelContext::from(&self.logger, &channel.context);
- log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
+ -> (Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
+ let logger = WithChannelContext::from(&self.logger, &channel.context, None);
+ log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement",
&channel.context.channel_id(),
if raa.is_some() { "an" } else { "no" },
- if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
+ if commitment_update.is_some() { "a" } else { "no" },
+ pending_forwards.len(), pending_update_adds.len(),
if funding_broadcastable.is_some() { "" } else { "not " },
if channel_ready.is_some() { "sending" } else { "without" },
if announcement_sigs.is_some() { "sending" } else { "without" });
- let mut htlc_forwards = None;
-
let counterparty_node_id = channel.context.get_counterparty_node_id();
+ let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
+
+ let mut htlc_forwards = None;
if !pending_forwards.is_empty() {
- htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
- channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
+ htlc_forwards = Some((short_channel_id, channel.context.get_funding_txo().unwrap(),
+ channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
+ }
+ let mut decode_update_add_htlcs = None;
+ if !pending_update_adds.is_empty() {
+ decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
}
if let Some(msg) = channel_ready {
emit_channel_ready_event!(pending_events, channel);
}
- htlc_forwards
+ (htlc_forwards, decode_update_add_htlcs)
}
fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
pending.retain(|upd| upd.update_id > highest_applied_update_id);
pending.len()
} else { 0 };
- let logger = WithChannelContext::from(&self.logger, &channel.context);
+ let logger = WithChannelContext::from(&self.logger, &channel.context, None);
log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
remaining_in_flight);
fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id));
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let peers_without_funded_channels =
// happening and return an error. N.B. that we create channel with an outbound SCID of zero so
// that we can delay allocating the SCID until after we're sure that the checks below will
// succeed.
- let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
+ let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
Some(unaccepted_channel) => {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
&unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
- &self.logger, accept_0conf).map_err(|e| {
- let err_str = e.to_string();
- log_error!(logger, "{}", err_str);
-
- APIError::ChannelUnavailable { err: err_str }
- })
- }
+ &self.logger, accept_0conf).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id))
+ },
_ => {
let err_str = "No such channel awaiting to be accepted.".to_owned();
log_error!(logger, "{}", err_str);
- Err(APIError::APIMisuseError { err: err_str })
+ return Err(APIError::APIMisuseError { err: err_str });
}
- }?;
+ };
- if accept_0conf {
- // This should have been correctly configured by the call to InboundV1Channel::new.
- debug_assert!(channel.context.minimum_depth().unwrap() == 0);
- } else if channel.context.get_channel_type().requires_zero_conf() {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+ match res {
+ Err(err) => {
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+ match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
+ Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
+ Err(e) => {
+ return Err(APIError::ChannelUnavailable { err: e.err });
+ },
}
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
- log_error!(logger, "{}", err_str);
+ }
+ Ok(mut channel) => {
+ if accept_0conf {
+ // This should have been correctly configured by the call to InboundV1Channel::new.
+ debug_assert!(channel.context.minimum_depth().unwrap() == 0);
+ } else if channel.context.get_channel_type().requires_zero_conf() {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+ log_error!(logger, "{}", err_str);
- return Err(APIError::APIMisuseError { err: err_str });
- } else {
- // If this peer already has some channels, a new channel won't increase our number of peers
- // with unfunded channels, so as long as we aren't over the maximum number of unfunded
- // channels per-peer we can accept channels from a peer with existing ones.
- if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
- }
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
- log_error!(logger, "{}", err_str);
+ return Err(APIError::APIMisuseError { err: err_str });
+ } else {
+ // If this peer already has some channels, a new channel won't increase our number of peers
+ // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+ // channels per-peer we can accept channels from a peer with existing ones.
+ if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+ log_error!(logger, "{}", err_str);
- return Err(APIError::APIMisuseError { err: err_str });
- }
- }
+ return Err(APIError::APIMisuseError { err: err_str });
+ }
+ }
- // Now that we know we have a channel, assign an outbound SCID alias.
- let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
- channel.context.set_outbound_scid_alias(outbound_scid_alias);
+ // Now that we know we have a channel, assign an outbound SCID alias.
+ let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+ channel.context.set_outbound_scid_alias(outbound_scid_alias);
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: channel.context.get_counterparty_node_id(),
- msg: channel.accept_inbound_channel(),
- });
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg: channel.accept_inbound_channel(),
+ });
- peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
+ peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
- Ok(())
+ Ok(())
+ },
+ }
}
/// Gets the number of peers which match the given filter and do not have any funded, outbound,
fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
where Filter: Fn(&PeerState<SP>) -> bool {
let mut peers_without_funded_channels = 0;
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
{
let peer_state_lock = self.per_peer_state.read().unwrap();
for (_, peer_mtx) in peer_state_lock.iter() {
num_unfunded_channels += 1;
}
},
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(chan) => {
+ // Only inbound V2 channels that are not 0conf and that we do not contribute to will be
+ // included in the unfunded count.
+ if chan.context.minimum_depth().unwrap_or(1) != 0 &&
+ chan.dual_funding_context.our_funding_satoshis == 0 {
+ num_unfunded_channels += 1;
+ }
+ },
ChannelPhase::UnfundedOutboundV1(_) => {
// Outbound channels don't contribute to the unfunded count in the DoS context.
continue;
+ },
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(_) => {
+ // Outbound channels don't contribute to the unfunded count in the DoS context.
+ continue;
}
}
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
// likely to be lost on restart!
- if msg.chain_hash != self.chain_hash {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
+ if msg.common_fields.chain_hash != self.chain_hash {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
if !self.default_configuration.accept_inbound_channels {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
// Get the number of peers with channels, but without funded ones. We don't care too much
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
- MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())
+ MsgHandleErrInternal::send_err_msg_no_close(
+ format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
+ msg.common_fields.temporary_channel_id.clone())
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
{
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"Have too many peers with unfunded channels, not accepting new ones".to_owned(),
- msg.temporary_channel_id.clone()));
+ msg.common_fields.temporary_channel_id.clone()));
}
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
- msg.temporary_channel_id.clone()));
+ msg.common_fields.temporary_channel_id.clone()));
}
- let channel_id = msg.temporary_channel_id;
+ let channel_id = msg.common_fields.temporary_channel_id;
let channel_exists = peer_state.has_channel(&channel_id);
if channel_exists {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "temporary_channel_id collision for the same peer!".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
// If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
if self.default_configuration.manually_accept_inbound_channels {
let channel_type = channel::channel_type_from_open_channel(
- &msg, &peer_state.latest_features, &self.channel_type_features()
+ &msg.common_fields, &peer_state.latest_features, &self.channel_type_features()
).map_err(|e|
- MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
+ MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id)
)?;
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::OpenChannelRequest {
- temporary_channel_id: msg.temporary_channel_id.clone(),
+ temporary_channel_id: msg.common_fields.temporary_channel_id.clone(),
counterparty_node_id: counterparty_node_id.clone(),
- funding_satoshis: msg.funding_satoshis,
+ funding_satoshis: msg.common_fields.funding_satoshis,
push_msat: msg.push_msat,
channel_type,
}, None));
&self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false)
{
Err(e) => {
- return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id));
+ return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id));
},
Ok(res) => res
};
let channel_type = channel.context.get_channel_type();
if channel_type.requires_zero_conf() {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "No zero confirmation channels accepted".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
if channel_type.requires_anchors_zero_fee_htlc_tx() {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone()));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "No channels with anchor outputs accepted".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
- MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+ match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
hash_map::Entry::Occupied(mut phase) => {
match phase.get_mut() {
ChannelPhase::UnfundedOutboundV1(chan) => {
try_chan_phase_entry!(self, chan.accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
- (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
+ (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id())
},
_ => {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
}
}
},
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
}
};
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::FundingGenerationReady {
- temporary_channel_id: msg.temporary_channel_id,
+ temporary_channel_id: msg.common_fields.temporary_channel_id,
counterparty_node_id: *counterparty_node_id,
channel_value_satoshis: value,
output_script,
let (mut chan, funding_msg_opt, monitor) =
match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
- let logger = WithChannelContext::from(&self.logger, &inbound_chan.context);
+ let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
Ok(res) => res,
Err((inbound_chan, err)) => {
}
Ok(())
} else {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
fail_chan!("Duplicate funding outpoint");
}
let logger = WithContext::from(
&self.logger,
Some(chan.context.get_counterparty_node_id()),
- Some(chan.context.channel_id())
+ Some(chan.context.channel_id()),
+ None
);
let res =
chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
if let Some(announcement_sigs) = announcement_sigs_opt {
match phase {
ChannelPhase::Funded(chan) => {
if !chan.received_shutdown() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
msg.channel_id,
if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
},
ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
let context = phase.context_mut();
- let logger = WithChannelContext::from(&self.logger, context);
+ let logger = WithChannelContext::from(&self.logger, context, None);
log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
let mut chan = remove_channel_phase!(self, chan_phase_entry);
finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
},
+ // TODO(dual_funding): Combine this match arm with above.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
+ let context = phase.context_mut();
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+ let mut chan = remove_channel_phase!(self, chan_phase_entry);
+ finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
+ },
}
} else {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
};
if let Some(broadcast_tx) = tx {
let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
- log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id), "Broadcasting {}", log_tx!(broadcast_tx));
+ log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
if let Some(ChannelPhase::Funded(chan)) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let pending_forward_info = match decoded_hop_res {
+ let mut pending_forward_info = match decoded_hop_res {
Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
self.construct_pending_htlc_status(
msg, counterparty_node_id, shared_secret, next_hop,
),
Err(e) => PendingHTLCStatus::Fail(e)
};
- let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash));
+ // If the update_add is completely bogus, the call will Err and we will close,
+ // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+ // want to reject the new HTLC and fail it backwards instead of forwarding.
+ if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
if msg.blinding_point.is_some() {
- return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
- msgs::UpdateFailMalformedHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- sha256_of_onion: [0; 32],
- failure_code: INVALID_ONION_BLINDING,
- }
- ))
- }
- // If the update_add is completely bogus, the call will Err and we will close,
- // but if we've sent a shutdown and they haven't acknowledged it yet, we just
- // want to reject the new HTLC and fail it backwards instead of forwarding.
- match pending_forward_info {
- PendingHTLCStatus::Forward(PendingHTLCInfo {
- ref incoming_shared_secret, ref routing, ..
- }) => {
- let reason = if routing.blinded_failure().is_some() {
- HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
- } else if (error_code & 0x1000) != 0 {
- let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
- HTLCFailReason::reason(real_code, error_data)
- } else {
- HTLCFailReason::from_failure_code(error_code)
- }.get_encrypted_failure_packet(incoming_shared_secret, &None);
- let msg = msgs::UpdateFailHTLC {
+ pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+ msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- reason
- };
- PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
- },
- _ => pending_forward_info
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }
+ ))
+ } else {
+ match pending_forward_info {
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
+ ref incoming_shared_secret, ref routing, ..
+ }) => {
+ let reason = if routing.blinded_failure().is_some() {
+ HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
+ } else if (error_code & 0x1000) != 0 {
+ let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+ HTLCFailReason::reason(real_code, error_data)
+ } else {
+ HTLCFailReason::from_failure_code(error_code)
+ }.get_encrypted_failure_packet(incoming_shared_secret, &None);
+ let msg = msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason
+ };
+ pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
+ },
+ _ => {},
+ }
}
- };
- let logger = WithChannelContext::from(&self.logger, &chan.context);
- try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
+ }
+ try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
+ let next_user_channel_id;
let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
msg.channel_id);
// outbound HTLC is claimed. This is guaranteed to all complete before we
// process the RAA as messages are processed from single peers serially.
funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
+ next_user_channel_id = chan.context.get_user_id();
res
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
};
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
- funding_txo, msg.channel_id
+ funding_txo, msg.channel_id, Some(next_user_channel_id),
);
Ok(())
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_txo = chan.context.get_funding_txo();
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
}
}
+ fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
+ let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
+ let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
+ push_forward_event &= decode_update_add_htlcs.is_empty();
+ let scid = update_add_htlcs.0;
+ match decode_update_add_htlcs.entry(scid) {
+ hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
+ hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
+ }
+ if push_forward_event { self.push_pending_forwards_ev(); }
+ }
+
#[inline]
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+ let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
+ if push_forward_event { self.push_pending_forwards_ev() }
+ }
+
+ #[inline]
+ fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
+ let mut push_forward_event = false;
for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
- let mut push_forward_event = false;
let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
// Pull this now to avoid introducing a lock order with `forward_htlcs`.
let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
+ let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
let forward_htlcs_empty = forward_htlcs.is_empty();
match forward_htlcs.entry(scid) {
prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
},
hash_map::Entry::Occupied(_) => {
- let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
+ let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash));
log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
} else {
// We don't want to generate a PendingHTLCsForwardable event if only intercepted
// payments are being processed.
- if forward_htlcs_empty {
- push_forward_event = true;
- }
+ push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
}
}
for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
- self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
+ push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
}
if !new_intercept_events.is_empty() {
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_intercept_events);
}
- if push_forward_event { self.push_pending_forwards_ev() }
}
+ push_forward_event
}
fn push_pending_forwards_ev(&self) {
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_txo_opt = chan.context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
self.raa_monitor_updates_held(
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: try_chan_phase_entry!(self, chan.announcement_signatures(
- &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
+ &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
msg, &self.default_configuration
), chan_phase_entry),
// Note that announcement_signatures fails if the channel cannot be announced,
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersistNoEvents);
} else {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
// If nothing changed after applying their update, we don't need to bother
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
- let htlc_forwards;
let need_lnd_workaround = {
let per_peer_state = self.per_peer_state.read().unwrap();
msg.channel_id
)
})?;
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
}
}
let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
- htlc_forwards = self.handle_channel_resumption(
+ let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
&mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
- Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ debug_assert!(htlc_forwards.is_none());
+ debug_assert!(decode_update_add_htlcs.is_none());
if let Some(upd) = channel_update {
peer_state.pending_msg_events.push(upd);
}
}
};
- let mut persist = NotifyOption::SkipPersistHandleEvents;
- if let Some(forwards) = htlc_forwards {
- self.forward_htlcs(&mut [forwards][..]);
- persist = NotifyOption::DoPersist;
- }
-
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
}
- Ok(persist)
+ Ok(NotifyOption::SkipPersistHandleEvents)
}
/// Process pending events from the [`chain::Watch`], returning whether any events were processed.
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
+ let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
self.claim_funds_internal(htlc_update.source, preimage,
htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
- false, counterparty_node_id, funding_outpoint, channel_id);
+ false, counterparty_node_id, funding_outpoint, channel_id, None);
} else {
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
}
},
- MonitorEvent::HolderForceClosed(_funding_outpoint) => {
+ MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
- failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
+ reason
+ } else {
+ ClosureReason::HolderForceClosed
+ };
+ failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
- msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
+ msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() })
},
});
}
let counterparty_node_id = chan.context.get_counterparty_node_id();
let funding_txo = chan.context.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) =
- chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context));
+ chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None));
if !holding_cell_failed_htlcs.is_empty() {
failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
}
peer_state.channel_by_id.retain(|channel_id, phase| {
match phase {
ChannelPhase::Funded(chan) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
if let Some(msg) = msg_opt {
// We're done with this channel. We got a closing_signed and sent back
// a closing_signed with a closing transaction to broadcast.
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
self.finish_close_channel(failure);
}
}
+}
+macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
/// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
- /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
- /// not have an expiration unless otherwise set on the builder.
+ /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer's
+ /// expiration will be `absolute_expiry` if `Some`, otherwise it will not expire.
///
/// # Privacy
///
- /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer.
- /// However, if one is not found, uses a one-hop [`BlindedPath`] with
- /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
- /// the node must be announced, otherwise, there is no way to find a path to the introduction in
- /// order to send the [`InvoiceRequest`].
+ /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the offer based on the given
+ /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
+ /// privacy implications as well as those of the parameterized [`Router`], which implements
+ /// [`MessageRouter`].
///
/// Also, uses a derived signing pubkey in the offer for recipient privacy.
///
///
/// Errors if the parameterized [`Router`] is unable to create a blinded path for the offer.
///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
/// [`Offer`]: crate::offers::offer::Offer
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
pub fn create_offer_builder(
- &self, description: String
- ) -> Result<OfferBuilder<DerivedMetadata, secp256k1::All>, Bolt12SemanticError> {
- let node_id = self.get_our_node_id();
- let expanded_key = &self.inbound_payment_key;
- let entropy = &*self.entropy_source;
- let secp_ctx = &self.secp_ctx;
-
- let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ &$self, absolute_expiry: Option<Duration>
+ ) -> Result<$builder, Bolt12SemanticError> {
+ let node_id = $self.get_our_node_id();
+ let expanded_key = &$self.inbound_payment_key;
+ let entropy = &*$self.entropy_source;
+ let secp_ctx = &$self.secp_ctx;
+
+ let path = $self.create_blinded_path_using_absolute_expiry(absolute_expiry)
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = OfferBuilder::deriving_signing_pubkey(
- description, node_id, expanded_key, entropy, secp_ctx
+ node_id, expanded_key, entropy, secp_ctx
)
- .chain_hash(self.chain_hash)
+ .chain_hash($self.chain_hash)
.path(path);
- Ok(builder)
+ let builder = match absolute_expiry {
+ None => builder,
+ Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry),
+ };
+
+ Ok(builder.into())
}
+} }
+macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
/// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
/// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund.
///
///
/// # Privacy
///
- /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund.
- /// However, if one is not found, uses a one-hop [`BlindedPath`] with
- /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
- /// the node must be announced, otherwise, there is no way to find a path to the introduction in
- /// order to send the [`Bolt12Invoice`].
+ /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the refund based on the given
+ /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
+ /// privacy implications as well as those of the parameterized [`Router`], which implements
+ /// [`MessageRouter`].
///
/// Also, uses a derived payer id in the refund for payer privacy.
///
/// - `amount_msats` is invalid, or
/// - the parameterized [`Router`] is unable to create a blinded path for the refund.
///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
/// [`Refund`]: crate::offers::refund::Refund
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
pub fn create_refund_builder(
- &self, description: String, amount_msats: u64, absolute_expiry: Duration,
- payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
- ) -> Result<RefundBuilder<secp256k1::All>, Bolt12SemanticError> {
- let node_id = self.get_our_node_id();
- let expanded_key = &self.inbound_payment_key;
- let entropy = &*self.entropy_source;
- let secp_ctx = &self.secp_ctx;
-
- let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ &$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
+ retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
+ ) -> Result<$builder, Bolt12SemanticError> {
+ let node_id = $self.get_our_node_id();
+ let expanded_key = &$self.inbound_payment_key;
+ let entropy = &*$self.entropy_source;
+ let secp_ctx = &$self.secp_ctx;
+
+ let path = $self.create_blinded_path_using_absolute_expiry(Some(absolute_expiry))
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = RefundBuilder::deriving_payer_id(
- description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
+ node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
)?
- .chain_hash(self.chain_hash)
+ .chain_hash($self.chain_hash)
.absolute_expiry(absolute_expiry)
.path(path);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
+
let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
- self.pending_outbound_payments
+ $self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
- Ok(builder)
+ Ok(builder.into())
}
+} }
+
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ #[cfg(not(c_bindings))]
+ create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
+ #[cfg(not(c_bindings))]
+ create_refund_builder!(self, RefundBuilder<secp256k1::All>);
+
+ #[cfg(c_bindings)]
+ create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
+ #[cfg(c_bindings)]
+ create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
/// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and
/// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual
///
/// # Privacy
///
- /// Uses a one-hop [`BlindedPath`] for the reply path with [`ChannelManager::get_our_node_id`]
- /// as the introduction node and a derived payer id for payer privacy. As such, currently, the
- /// node must be announced. Otherwise, there is no way to find a path to the introduction node
- /// in order to send the [`Bolt12Invoice`].
+ /// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
+ /// to construct a [`BlindedPath`] for the reply path. For further privacy implications, see the
+ /// docs of the parameterized [`Router`], which implements [`MessageRouter`].
///
/// # Limitations
///
/// Errors if:
/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
/// - the provided parameters are invalid for the offer,
+ /// - the offer is for an unsupported chain, or
/// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
/// request.
///
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
- let builder = offer
+ let builder: InvoiceRequestBuilder<DerivedPayerId, secp256k1::All> = offer
.request_invoice_deriving_payer_id(expanded_key, entropy, secp_ctx, payment_id)?
- .chain_hash(self.chain_hash)?;
+ .into();
+ let builder = builder.chain_hash(self.chain_hash)?;
+
let builder = match quantity {
None => builder,
Some(quantity) => builder.quantity(quantity)?,
let invoice_request = builder.build_and_sign()?;
let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
let expiration = StaleExpiration::TimerTicks(1);
self.pending_outbound_payments
.add_new_awaiting_invoice(
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
- if offer.paths().is_empty() {
- let message = new_pending_onion_message(
- OffersMessage::InvoiceRequest(invoice_request),
- Destination::Node(offer.signing_pubkey()),
- Some(reply_path),
- );
- pending_offers_messages.push(message);
- } else {
+ if !offer.paths().is_empty() {
// Send as many invoice requests as there are paths in the offer (with an upper bound).
// Using only one path could result in a failure if the path no longer exists. But only
// one invoice for a given payment id will be paid, even if more than one is received.
);
pending_offers_messages.push(message);
}
+ } else if let Some(signing_pubkey) = offer.signing_pubkey() {
+ let message = new_pending_onion_message(
+ OffersMessage::InvoiceRequest(invoice_request),
+ Destination::Node(signing_pubkey),
+ Some(reply_path),
+ );
+ pending_offers_messages.push(message);
+ } else {
+ debug_assert!(false);
+ return Err(Bolt12SemanticError::MissingSigningPubkey);
}
Ok(())
///
/// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a
/// [`BlindedPath`] containing the [`PaymentSecret`] needed to reconstruct the corresponding
- /// [`PaymentPreimage`].
+ /// [`PaymentPreimage`]. It is returned purely for informational purposes.
///
/// # Limitations
///
///
/// # Errors
///
- /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
- /// path for the invoice.
+ /// Errors if:
+ /// - the refund is for an unsupported chain, or
+ /// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for
+ /// the invoice.
///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
- pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
+ pub fn request_refund_payment(
+ &self, refund: &Refund
+ ) -> Result<Bolt12Invoice, Bolt12SemanticError> {
let expanded_key = &self.inbound_payment_key;
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
let amount_msats = refund.amount_msats();
let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+ if refund.chain() != self.chain_hash {
+ return Err(Bolt12SemanticError::UnsupportedChain);
+ }
+
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
Ok((payment_hash, payment_secret)) => {
- let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
+ let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
+ let payment_paths = self.create_blinded_payment_paths(
+ amount_msats, payment_secret, payment_context
+ )
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
#[cfg(feature = "std")]
let builder = refund.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at, expanded_key, entropy
)?;
+ let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
let reply_path = self.create_blinded_path()
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
if refund.paths().is_empty() {
let message = new_pending_onion_message(
- OffersMessage::Invoice(invoice),
+ OffersMessage::Invoice(invoice.clone()),
Destination::Node(refund.payer_id()),
Some(reply_path),
);
}
}
- Ok(())
+ Ok(invoice)
},
Err(()) => Err(Bolt12SemanticError::InvalidAmount),
}
/// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
/// [`PaymentHash`] and [`PaymentPreimage`] for you.
///
- /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which
- /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with
- /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be
- /// passed directly to [`claim_funds`].
+ /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`] event, which
+ /// will have the [`PaymentClaimable::purpose`] return `Some` for [`PaymentPurpose::preimage`]. That
+ /// should then be passed directly to [`claim_funds`].
///
/// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
///
/// [`claim_funds`]: Self::claim_funds
/// [`PaymentClaimable`]: events::Event::PaymentClaimable
/// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
- /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment
- /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage
+ /// [`PaymentPurpose::preimage`]: events::PaymentPurpose::preimage
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
}
+ /// Creates a blinded path by delegating to [`MessageRouter`] based on the path's intended
+ /// lifetime.
+ ///
+ /// Whether or not the path is compact depends on whether the path is short-lived or long-lived,
+ /// respectively, based on the given `absolute_expiry` as seconds since the Unix epoch. See
+ /// [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`].
+ fn create_blinded_path_using_absolute_expiry(
+ &self, absolute_expiry: Option<Duration>
+ ) -> Result<BlindedPath, ()> {
+ let now = self.duration_since_epoch();
+ let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY);
+
+ if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry {
+ self.create_compact_blinded_path()
+ } else {
+ self.create_blinded_path()
+ }
+ }
+
+ pub(super) fn duration_since_epoch(&self) -> Duration {
+ #[cfg(not(feature = "std"))]
+ let now = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+ );
+ #[cfg(feature = "std")]
+ let now = std::time::SystemTime::now()
+ .duration_since(std::time::SystemTime::UNIX_EPOCH)
+ .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
+
+ now
+ }
+
/// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`].
///
/// Errors if the `MessageRouter` errors or returns an empty `Vec`.
let peers = self.per_peer_state.read().unwrap()
.iter()
- .filter(|(_, peer)| peer.lock().unwrap().latest_features.supports_onion_messages())
+ .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
+ .filter(|(_, peer)| peer.is_connected)
+ .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
.map(|(node_id, _)| *node_id)
.collect::<Vec<_>>();
.and_then(|paths| paths.into_iter().next().ok_or(()))
}
+ /// Creates a blinded path by delegating to [`MessageRouter::create_compact_blinded_paths`].
+ ///
+ /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
+ fn create_compact_blinded_path(&self) -> Result<BlindedPath, ()> {
+ let recipient = self.get_our_node_id();
+ let secp_ctx = &self.secp_ctx;
+
+ let peers = self.per_peer_state.read().unwrap()
+ .iter()
+ .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
+ .filter(|(_, peer)| peer.is_connected)
+ .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
+ .map(|(node_id, peer)| ForwardNode {
+ node_id: *node_id,
+ short_channel_id: peer.channel_by_id
+ .iter()
+ .filter(|(_, channel)| channel.context().is_usable())
+ .min_by_key(|(_, channel)| channel.context().channel_creation_height)
+ .and_then(|(_, channel)| channel.context().get_short_channel_id()),
+ })
+ .collect::<Vec<_>>();
+
+ self.router
+ .create_compact_blinded_paths(recipient, peers, secp_ctx)
+ .and_then(|paths| paths.into_iter().next().ok_or(()))
+ }
+
/// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
/// [`Router::create_blinded_payment_paths`].
fn create_blinded_payment_paths(
- &self, amount_msats: u64, payment_secret: PaymentSecret
+ &self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext
) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
let secp_ctx = &self.secp_ctx;
let first_hops = self.list_usable_channels();
let payee_node_id = self.get_our_node_id();
- let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY
+ let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
+ LATENCY_GRACE_PERIOD_BLOCKS;
let payee_tlvs = ReceiveTlvs {
payment_secret,
max_cltv_expiry,
htlc_minimum_msat: 1,
},
+ payment_context,
};
self.router.create_blinded_payment_paths(
payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
///
/// [phantom node payments]: crate::sign::PhantomKeysManager
pub fn get_phantom_scid(&self) -> u64 {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
/// Note that this method is not guaranteed to return unique values, you may need to call it a few
/// times to get a unique scid.
pub fn get_intercept_scid(&self) -> u64 {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
let logger = WithContext::from(
- &self.logger, Some(counterparty_node_id), Some(channel_id),
+ &self.logger, Some(counterparty_node_id), Some(channel_id), None
);
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
/// will randomly be placed first or last in the returned array.
///
/// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
- /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among
+ /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among
/// the `MessageSendEvent`s to the specific peer they were generated under.
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let events = RefCell::new(Vec::new());
result = NotifyOption::DoPersist;
}
+ let mut is_any_peer_connected = false;
let mut pending_events = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
if peer_state.pending_msg_events.len() > 0 {
pending_events.append(&mut peer_state.pending_msg_events);
}
+ if peer_state.is_connected {
+ is_any_peer_connected = true
+ }
+ }
+
+ // Ensure that we are connected to some peers before getting broadcast messages.
+ if is_any_peer_connected {
+ let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
+ pending_events.append(&mut broadcast_msgs);
}
if !pending_events.is_empty() {
fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
{
let best_block = self.best_block.read().unwrap();
- assert_eq!(best_block.block_hash(), header.prev_blockhash,
+ assert_eq!(best_block.block_hash, header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
- assert_eq!(best_block.height(), height - 1,
+ assert_eq!(best_block.height, height - 1,
"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
}
let new_height = height - 1;
{
let mut best_block = self.best_block.write().unwrap();
- assert_eq!(best_block.block_hash(), header.block_hash(),
+ assert_eq!(best_block.block_hash, header.block_hash(),
"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
- assert_eq!(best_block.height(), height,
+ assert_eq!(best_block.height, height,
"Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
- self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
}
}
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
- self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
+ self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
.map(|(a, b)| (a, Vec::new(), b)));
- let last_best_block_height = self.best_block.read().unwrap().height();
+ let last_best_block_height = self.best_block.read().unwrap().height;
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
- self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
+ self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
}
}
self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
macro_rules! max_time {
($timestamp: expr) => {
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.context.get_funding_txo() {
if funding_txo.txid == *txid {
- channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context)).map(|()| (None, Vec::new(), None))
+ channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context, None)).map(|()| (None, Vec::new(), None))
} else { Ok((None, Vec::new(), None)) }
} else { Ok((None, Vec::new(), None)) }
});
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
+
peer_state.channel_by_id.retain(|_, phase| {
match phase {
// Retain unfunded channels.
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true,
+ // TODO(dual_funding): Combine this match arm with above.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
ChannelPhase::Funded(channel) => {
let res = f(channel);
if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
}
- let logger = WithChannelContext::from(&self.logger, &channel.context);
+ let logger = WithChannelContext::from(&self.logger, &channel.context, None);
if let Some(channel_ready) = channel_ready_opt {
send_channel_ready!(self, pending_msg_events, channel, channel_ready);
if channel.context.is_usable() {
let reason_message = format!("{}", reason);
failed_channels.push(channel.context.force_shutdown(true, reason));
if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
HTLCFailReason::from_failure_code(0x2000 | 2),
HTLCDestination::InvalidForward { requested_forward_scid }));
let logger = WithContext::from(
- &self.logger, None, Some(htlc.prev_channel_id)
+ &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
);
log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
false
}
/// Returns true if this [`ChannelManager`] needs to be persisted.
+ ///
+ /// See [`Self::get_event_or_persistence_needed_future`] for retrieving a [`Future`] that
+ /// indicates this should be checked.
pub fn get_and_clear_needs_persistence(&self) -> bool {
self.needs_persist_flag.swap(false, Ordering::AcqRel)
}
fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
- msg.temporary_channel_id.clone())), *counterparty_node_id);
+ msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
}
fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
- msg.temporary_channel_id.clone())), *counterparty_node_id);
+ msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
}
fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
msg.channel_id.clone())), *counterparty_node_id);
}
+ #[cfg(splicing)]
fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported".to_owned(),
msg.channel_id.clone())), *counterparty_node_id);
}
+ #[cfg(splicing)]
fn handle_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported (splice_ack)".to_owned(),
msg.channel_id.clone())), *counterparty_node_id);
}
+ #[cfg(splicing)]
fn handle_splice_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceLocked) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported (splice_locked)".to_owned(),
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
log_debug!(
- WithContext::from(&self.logger, Some(*counterparty_node_id), None),
+ WithContext::from(&self.logger, Some(*counterparty_node_id), None, None),
"Marking channels with {} disconnected and generating channel_updates.",
log_pubkey!(counterparty_node_id)
);
peer_state.channel_by_id.retain(|_, phase| {
let context = match phase {
ChannelPhase::Funded(chan) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
// We only retain funded channels that are not shutdown.
return true;
}
&mut chan.context
},
- // We retain UnfundedOutboundV1 channel for some time in case
- // peer unexpectedly disconnects, and intends to reconnect again.
- ChannelPhase::UnfundedOutboundV1(_) => {
- return true;
- },
+ // If we get disconnected and haven't yet committed to a funding
+ // transaction, we can replay the `open_channel` on reconnection, so don't
+ // bother dropping the channel here. However, if we already committed to
+ // the funding transaction we don't yet support replaying the funding
+ // handshake (and bailing if the peer rejects it), so we force-close in
+ // that case.
+ ChannelPhase::UnfundedOutboundV1(chan) if chan.is_resumable() => return true,
+ ChannelPhase::UnfundedOutboundV1(chan) => &mut chan.context,
// Unfunded inbound channels will always be removed.
ChannelPhase::UnfundedInboundV1(chan) => {
&mut chan.context
},
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(chan) => {
+ &mut chan.context
+ },
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(chan) => {
+ &mut chan.context
+ },
};
// Clean up for removal.
update_maps_on_chan_removal!(self, &context);
// Gossip
&events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
- &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+ // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`]
+ // This check here is to ensure exhaustivity.
+ &events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
+ debug_assert!(false, "This event shouldn't have been here");
+ false
+ },
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { .. } => false,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
}
fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None);
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None, None);
if !init_msg.features.supports_static_remote_key() {
log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
return Err(());
let mut peer_state = e.get().lock().unwrap();
peer_state.latest_features = init_msg.features.clone();
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
if inbound_peer_limited &&
Self::unfunded_channel_count(&*peer_state, best_block_height) ==
peer_state.channel_by_id.len()
for (_, phase) in peer_state.channel_by_id.iter_mut() {
match phase {
ChannelPhase::Funded(chan) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
node_id: chan.context.get_counterparty_node_id(),
msg: chan.get_channel_reestablish(&&logger),
});
}
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(chan) => {
+ pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_open_channel_v2(self.chain_hash),
+ });
+ },
+
ChannelPhase::UnfundedInboundV1(_) => {
// Since unfunded inbound channel maps are cleared upon disconnecting a peer,
// they are not persisted and won't be recovered after a crash.
// Therefore, they shouldn't exist at this point.
debug_assert!(false);
}
+
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(channel) => {
+ // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
+ // they are not persisted and won't be recovered after a crash.
+ // Therefore, they shouldn't exist at this point.
+ debug_assert!(false);
+ },
}
}
}
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-
match &msg.data as &str {
"cannot co-op close channel w/ active htlcs"|
"link failed to shutdown" =>
// We're not going to bother handling this in a sensible way, instead simply
// repeating the Shutdown message on repeat until morale improves.
if !msg.channel_id.is_zero() {
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if peer_state_mutex_opt.is_none() { return; }
- let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
- if let Some(msg) = chan.get_outbound_shutdown() {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
- peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: *counterparty_node_id,
- action: msgs::ErrorAction::SendWarningMessage {
- msg: msgs::WarningMessage {
- channel_id: msg.channel_id,
- data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
- },
- log_level: Level::Trace,
+ PersistenceNotifierGuard::optionally_notify(
+ self,
+ || -> NotifyOption {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
+ let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
+ if let Some(msg) = chan.get_outbound_shutdown() {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
+ peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: *counterparty_node_id,
+ action: msgs::ErrorAction::SendWarningMessage {
+ msg: msgs::WarningMessage {
+ channel_id: msg.channel_id,
+ data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
+ },
+ log_level: Level::Trace,
+ }
+ });
+ // This can happen in a fairly tight loop, so we absolutely cannot trigger
+ // a `ChannelManager` write here.
+ return NotifyOption::SkipPersistHandleEvents;
}
- });
- }
+ NotifyOption::SkipPersistNoEvents
+ }
+ );
}
return;
}
_ => {}
}
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
if msg.channel_id.is_zero() {
let channel_ids: Vec<ChannelId> = {
let per_peer_state = self.per_peer_state.read().unwrap();
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
- if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
- node_id: *counterparty_node_id,
- msg,
- });
- return;
- }
+ match peer_state.channel_by_id.get_mut(&msg.channel_id) {
+ Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ return;
+ }
+ },
+ #[cfg(any(dual_funding, splicing))]
+ Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ return;
+ }
+ },
+ None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::Funded(_)) => (),
+ #[cfg(any(dual_funding, splicing))]
+ Some(ChannelPhase::UnfundedInboundV2(_)) => (),
}
}
R::Target: Router,
L::Target: Logger,
{
- fn handle_message(&self, message: OffersMessage) -> Option<OffersMessage> {
+ fn handle_message(&self, message: OffersMessage, responder: Option<Responder>) -> ResponseInstruction<OffersMessage> {
let secp_ctx = &self.secp_ctx;
let expanded_key = &self.inbound_payment_key;
match message {
OffersMessage::InvoiceRequest(invoice_request) => {
+ let responder = match responder {
+ Some(responder) => responder,
+ None => return ResponseInstruction::NoResponse,
+ };
let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
&invoice_request
) {
Ok(amount_msats) => amount_msats,
- Err(error) => return Some(OffersMessage::InvoiceError(error.into())),
+ Err(error) => return responder.respond(OffersMessage::InvoiceError(error.into())),
};
let invoice_request = match invoice_request.verify(expanded_key, secp_ctx) {
Ok(invoice_request) => invoice_request,
Err(()) => {
let error = Bolt12SemanticError::InvalidMetadata;
- return Some(OffersMessage::InvoiceError(error.into()));
+ return responder.respond(OffersMessage::InvoiceError(error.into()));
},
};
Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
Err(()) => {
let error = Bolt12SemanticError::InvalidAmount;
- return Some(OffersMessage::InvoiceError(error.into()));
+ return responder.respond(OffersMessage::InvoiceError(error.into()));
},
};
+ let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
+ offer_id: invoice_request.offer_id,
+ invoice_request: invoice_request.fields(),
+ });
let payment_paths = match self.create_blinded_payment_paths(
- amount_msats, payment_secret
+ amount_msats, payment_secret, payment_context
) {
Ok(payment_paths) => payment_paths,
Err(()) => {
let error = Bolt12SemanticError::MissingPaths;
- return Some(OffersMessage::InvoiceError(error.into()));
+ return responder.respond(OffersMessage::InvoiceError(error.into()));
},
};
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
- if invoice_request.keys.is_some() {
+ let response = if invoice_request.keys.is_some() {
#[cfg(feature = "std")]
let builder = invoice_request.respond_using_derived_keys(
payment_paths, payment_hash
let builder = invoice_request.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at
);
- match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
- Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
- Err(error) => Some(OffersMessage::InvoiceError(error.into())),
- }
+ builder
+ .map(InvoiceBuilder::<DerivedSigningPubkey>::from)
+ .and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx))
+ .map_err(InvoiceError::from)
} else {
#[cfg(feature = "std")]
let builder = invoice_request.respond_with(payment_paths, payment_hash);
let builder = invoice_request.respond_with_no_std(
payment_paths, payment_hash, created_at
);
- let response = builder.and_then(|builder| builder.allow_mpp().build())
- .map_err(|e| OffersMessage::InvoiceError(e.into()))
- .and_then(|invoice|
- match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
- Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
- Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
- InvoiceError::from_string("Failed signing invoice".to_string())
- )),
- Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
- InvoiceError::from_string("Failed invoice signature verification".to_string())
- )),
- });
- match response {
- Ok(invoice) => Some(invoice),
- Err(error) => Some(error),
- }
+ builder
+ .map(InvoiceBuilder::<ExplicitSigningPubkey>::from)
+ .and_then(|builder| builder.allow_mpp().build())
+ .map_err(InvoiceError::from)
+ .and_then(|invoice| {
+ #[cfg(c_bindings)]
+ let mut invoice = invoice;
+ invoice
+ .sign(|invoice: &UnsignedBolt12Invoice|
+ self.node_signer.sign_bolt12_invoice(invoice)
+ )
+ .map_err(InvoiceError::from)
+ })
+ };
+
+ match response {
+ Ok(invoice) => return responder.respond(OffersMessage::Invoice(invoice)),
+ Err(error) => return responder.respond(OffersMessage::InvoiceError(error.into())),
}
},
OffersMessage::Invoice(invoice) => {
- match invoice.verify(expanded_key, secp_ctx) {
- Err(()) => {
- Some(OffersMessage::InvoiceError(InvoiceError::from_string("Unrecognized invoice".to_owned())))
- },
- Ok(_) if invoice.invoice_features().requires_unknown_bits_from(&self.bolt12_invoice_features()) => {
- Some(OffersMessage::InvoiceError(Bolt12SemanticError::UnknownRequiredFeatures.into()))
- },
- Ok(payment_id) => {
- if let Err(e) = self.send_payment_for_bolt12_invoice(&invoice, payment_id) {
- log_trace!(self.logger, "Failed paying invoice: {:?}", e);
- Some(OffersMessage::InvoiceError(InvoiceError::from_string(format!("{:?}", e))))
+ let response = invoice
+ .verify(expanded_key, secp_ctx)
+ .map_err(|()| InvoiceError::from_string("Unrecognized invoice".to_owned()))
+ .and_then(|payment_id| {
+ let features = self.bolt12_invoice_features();
+ if invoice.invoice_features().requires_unknown_bits_from(&features) {
+ Err(InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures))
} else {
- None
+ self.send_payment_for_bolt12_invoice(&invoice, payment_id)
+ .map_err(|e| {
+ log_trace!(self.logger, "Failed paying invoice: {:?}", e);
+ InvoiceError::from_string(format!("{:?}", e))
+ })
}
- },
+ });
+
+ match (responder, response) {
+ (Some(responder), Err(e)) => responder.respond(OffersMessage::InvoiceError(e)),
+ (None, Err(_)) => {
+ log_trace!(
+ self.logger,
+ "A response was generated, but there is no reply_path specified for sending the response."
+ );
+ return ResponseInstruction::NoResponse;
+ }
+ _ => return ResponseInstruction::NoResponse,
}
},
OffersMessage::InvoiceError(invoice_error) => {
log_trace!(self.logger, "Received invoice_error: {}", invoice_error);
- None
+ return ResponseInstruction::NoResponse;
},
}
}
}
}
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
+ self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
+ }
+}
+
/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
-impl_writeable_tlv_based!(CounterpartyForwardingInfo, {
- (2, fee_base_msat, required),
- (4, fee_proportional_millionths, required),
- (6, cltv_expiry_delta, required),
-});
-
-impl_writeable_tlv_based!(ChannelCounterparty, {
- (2, node_id, required),
- (4, features, required),
- (6, unspendable_punishment_reserve, required),
- (8, forwarding_info, option),
- (9, outbound_htlc_minimum_msat, option),
- (11, outbound_htlc_maximum_msat, option),
-});
-
-impl Writeable for ChannelDetails {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
- // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
- let user_channel_id_low = self.user_channel_id as u64;
- let user_channel_id_high_opt = Some((self.user_channel_id >> 64) as u64);
- write_tlv_fields!(writer, {
- (1, self.inbound_scid_alias, option),
- (2, self.channel_id, required),
- (3, self.channel_type, option),
- (4, self.counterparty, required),
- (5, self.outbound_scid_alias, option),
- (6, self.funding_txo, option),
- (7, self.config, option),
- (8, self.short_channel_id, option),
- (9, self.confirmations, option),
- (10, self.channel_value_satoshis, required),
- (12, self.unspendable_punishment_reserve, option),
- (14, user_channel_id_low, required),
- (16, self.balance_msat, required),
- (18, self.outbound_capacity_msat, required),
- (19, self.next_outbound_htlc_limit_msat, required),
- (20, self.inbound_capacity_msat, required),
- (21, self.next_outbound_htlc_minimum_msat, required),
- (22, self.confirmations_required, option),
- (24, self.force_close_spend_delay, option),
- (26, self.is_outbound, required),
- (28, self.is_channel_ready, required),
- (30, self.is_usable, required),
- (32, self.is_public, required),
- (33, self.inbound_htlc_minimum_msat, option),
- (35, self.inbound_htlc_maximum_msat, option),
- (37, user_channel_id_high_opt, option),
- (39, self.feerate_sat_per_1000_weight, option),
- (41, self.channel_shutdown_state, option),
- (43, self.pending_inbound_htlcs, optional_vec),
- (45, self.pending_outbound_htlcs, optional_vec),
- });
- Ok(())
- }
-}
-
-impl Readable for ChannelDetails {
- fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
- _init_and_read_len_prefixed_tlv_fields!(reader, {
- (1, inbound_scid_alias, option),
- (2, channel_id, required),
- (3, channel_type, option),
- (4, counterparty, required),
- (5, outbound_scid_alias, option),
- (6, funding_txo, option),
- (7, config, option),
- (8, short_channel_id, option),
- (9, confirmations, option),
- (10, channel_value_satoshis, required),
- (12, unspendable_punishment_reserve, option),
- (14, user_channel_id_low, required),
- (16, balance_msat, required),
- (18, outbound_capacity_msat, required),
- // Note that by the time we get past the required read above, outbound_capacity_msat will be
- // filled in, so we can safely unwrap it here.
- (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
- (20, inbound_capacity_msat, required),
- (21, next_outbound_htlc_minimum_msat, (default_value, 0)),
- (22, confirmations_required, option),
- (24, force_close_spend_delay, option),
- (26, is_outbound, required),
- (28, is_channel_ready, required),
- (30, is_usable, required),
- (32, is_public, required),
- (33, inbound_htlc_minimum_msat, option),
- (35, inbound_htlc_maximum_msat, option),
- (37, user_channel_id_high_opt, option),
- (39, feerate_sat_per_1000_weight, option),
- (41, channel_shutdown_state, option),
- (43, pending_inbound_htlcs, optional_vec),
- (45, pending_outbound_htlcs, optional_vec),
- });
-
- // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
- // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
- let user_channel_id_low: u64 = user_channel_id_low.0.unwrap();
- let user_channel_id = user_channel_id_low as u128 +
- ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
-
- Ok(Self {
- inbound_scid_alias,
- channel_id: channel_id.0.unwrap(),
- channel_type,
- counterparty: counterparty.0.unwrap(),
- outbound_scid_alias,
- funding_txo,
- config,
- short_channel_id,
- channel_value_satoshis: channel_value_satoshis.0.unwrap(),
- unspendable_punishment_reserve,
- user_channel_id,
- balance_msat: balance_msat.0.unwrap(),
- outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
- next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
- next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
- inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
- confirmations_required,
- confirmations,
- force_close_spend_delay,
- is_outbound: is_outbound.0.unwrap(),
- is_channel_ready: is_channel_ready.0.unwrap(),
- is_usable: is_usable.0.unwrap(),
- is_public: is_public.0.unwrap(),
- inbound_htlc_minimum_msat,
- inbound_htlc_maximum_msat,
- feerate_sat_per_1000_weight,
- channel_shutdown_state,
- pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()),
- pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()),
- })
- }
-}
-
impl_writeable_tlv_based!(PhantomRouteHints, {
(2, channels, required_vec),
(4, phantom_scid, required),
(3, payment_metadata, option),
(5, custom_tlvs, optional_vec),
(7, requires_blinded_error, (default_value, false)),
+ (9, payment_context, option),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
+ (1, requires_blinded_error, (default_value, false)),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(4, payment_data, option), // Added in 0.0.116
impl Writeable for ClaimableHTLC {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let (payment_data, keysend_preimage) = match &self.onion_payload {
- OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
+ OnionPayload::Invoice { _legacy_hop_data } => {
+ (_legacy_hop_data.as_ref(), None)
+ },
OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
};
write_tlv_fields!(writer, {
self.chain_hash.write(writer)?;
{
let best_block = self.best_block.read().unwrap();
- best_block.height().write(writer)?;
- best_block.block_hash().write(writer)?;
+ best_block.height.write(writer)?;
+ best_block.block_hash.write(writer)?;
}
+ let per_peer_state = self.per_peer_state.write().unwrap();
+
let mut serializable_peer_count: u64 = 0;
{
- let per_peer_state = self.per_peer_state.read().unwrap();
let mut number_of_funded_channels = 0;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
}
}
- let per_peer_state = self.per_peer_state.write().unwrap();
+ let mut decode_update_add_htlcs_opt = None;
+ let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
+ if !decode_update_add_htlcs.is_empty() {
+ decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
+ }
let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
let claimable_payments = self.claimable_payments.lock().unwrap();
(10, in_flight_monitor_updates, option),
(11, self.probing_cookie_secret, required),
(13, htlc_onion_fields, optional_vec),
+ (14, decode_update_add_htlcs_opt, option),
});
Ok(())
}
}
-impl_writeable_tlv_based_enum!(ChannelShutdownState,
- (0, NotShuttingDown) => {},
- (2, ShutdownInitiated) => {},
- (4, ResolvingHTLCs) => {},
- (6, NegotiatingClosingFee) => {},
- (8, ShutdownComplete) => {}, ;
-);
-
/// Arguments for the creation of a ChannelManager that are not deserialized.
///
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
- let logger = WithChannelContext::from(&args.logger, &channel.context);
+ let logger = WithChannelContext::from(&args.logger, &channel.context, None);
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
funding_txo_set.insert(funding_txo.clone());
// claim update ChannelMonitor updates were persisted prior to persising
// the ChannelMonitor update for the forward leg, so attempting to fail the
// backwards leg of the HTLC will simply be rejected.
+ let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash));
log_info!(logger,
"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
&channel.context.channel_id(), &payment_hash);
}
}
} else {
- log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+ channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id());
+ log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
&channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
- monitor.get_latest_update_id());
+ monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
for (funding_txo, monitor) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
- let logger = WithChannelMonitor::from(&args.logger, monitor);
+ let logger = WithChannelMonitor::from(&args.logger, monitor, None);
let channel_id = monitor.channel_id();
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
&channel_id);
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
let mut events_override = None;
let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
+ let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(10, in_flight_monitor_updates, option),
(11, probing_cookie_secret, option),
(13, claimable_htlc_onion_fields, optional_vec),
+ (14, decode_update_add_htlcs, option),
});
+ let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
if fake_scid_rand_bytes.is_none() {
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
}
let peer_state = &mut *peer_state_lock;
for phase in peer_state.channel_by_id.values() {
if let ChannelPhase::Funded(chan) = phase {
- let logger = WithChannelContext::from(&args.logger, &chan.context);
+ let logger = WithChannelContext::from(&args.logger, &chan.context, None);
// Channels that were persisted have to be funded, otherwise they should have been
// discarded.
}
}
if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
- // If the channel is ahead of the monitor, return InvalidValue:
+ // If the channel is ahead of the monitor, return DangerousValue:
log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
- return Err(DecodeError::InvalidValue);
+ return Err(DecodeError::DangerousValue);
}
} else {
// We shouldn't have persisted (or read) any unfunded channel types so none should have been
if let Some(in_flight_upds) = in_flight_monitor_updates {
for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
- let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
+ let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None);
if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
// Now that we've removed all the in-flight monitor updates for channels that are
// still open, we need to replay any monitor updates that are for closed channels,
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+ log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
return Err(DecodeError::InvalidValue);
}
}
for (_, monitor) in args.channel_monitors.iter() {
let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
if counterparty_opt.is_none() {
- let logger = WithChannelMonitor::from(&args.logger, monitor);
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
+ let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
if path.hops.is_empty() {
log_error!(logger, "Got an empty path for a pending payment");
}
}
for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
+ let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
match htlc_source {
HTLCSource::PreviousHopData(prev_hop_data) => {
let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
// still have an entry for this HTLC in `forward_htlcs` or
// `pending_intercepted_htlcs`, we were apparently not persisted after
// the monitor was when forwarding the payment.
+ decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
+ update_add_htlcs.retain(|update_add_htlc| {
+ let matches = *scid == prev_hop_data.short_channel_id &&
+ update_add_htlc.htlc_id == prev_hop_data.htlc_id;
+ if matches {
+ log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
+ &htlc.payment_hash, &monitor.channel_id());
+ }
+ !matches
+ });
+ !update_add_htlcs.is_empty()
+ });
forward_htlcs.retain(|_, forwards| {
forwards.retain(|forward| {
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
}
}
- if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
+ if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
// If we have pending HTLCs to forward, assume we either dropped a
// `PendingHTLCsForwardable` or the user received it but never processed it as they
// shut down before the timer hit. Either way, set the time_forwardable to a small
let purpose = match &htlcs[0].onion_payload {
OnionPayload::Invoice { _legacy_hop_data } => {
if let Some(hop_data) = _legacy_hop_data {
- events::PaymentPurpose::InvoicePayment {
+ events::PaymentPurpose::Bolt11InvoicePayment {
payment_preimage: match pending_inbound_payments.get(&payment_hash) {
Some(inbound_payment) => inbound_payment.payment_preimage,
None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
let peer_state = &mut *peer_state_lock;
for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
if let ChannelPhase::Funded(chan) = phase {
- let logger = WithChannelContext::from(&args.logger, &chan.context);
+ let logger = WithChannelContext::from(&args.logger, &chan.context, None);
if chan.context.outbound_scid_alias() == 0 {
let mut outbound_scid_alias;
loop {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
- let logger = WithChannelContext::from(&args.logger, &channel.context);
+ let logger = WithChannelContext::from(&args.logger, &channel.context, Some(payment_hash));
channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
}
}
amount_msat: claimable_amt_msat,
htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
+ onion_fields: payment.onion_fields,
}, None));
}
}
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
if let Some(peer_state) = per_peer_state.get(&node_id) {
for (channel_id, actions) in monitor_update_blocked_actions.iter() {
- let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id));
+ let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
for action in actions.iter() {
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
downstream_counterparty_and_funding_outpoint:
}
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
} else {
- log_error!(WithContext::from(&args.logger, Some(node_id), None), "Got blocked actions without a per-peer-state for {}", node_id);
+ log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id);
return Err(DecodeError::InvalidValue);
}
}
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
+ decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
outpoint_to_peer: Mutex::new(outpoint_to_peer),
pending_offers_messages: Mutex::new(Vec::new()),
+ pending_broadcast_messages: Mutex::new(Vec::new()),
+
entropy_source: args.entropy_source,
node_signer: args.node_signer,
signer_provider: args.signer_provider,
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
- downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
+ downstream_closed, true, downstream_node_id, downstream_funding,
+ downstream_channel_id, None
+ );
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use core::sync::atomic::Ordering;
use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
- use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
- use crate::ln::ChannelId;
+ use crate::ln::types::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{self, ErrorAction};
}
}
+ #[test]
+ fn test_channel_update_cached() {
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+
+ // Confirm that the channel_update was not sent immediately to node[1] but was cached.
+ let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(node_1_events.len(), 0);
+
+ {
+ // Assert that ChannelUpdate message has been added to node[0] pending broadcast messages
+ let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
+ assert_eq!(pending_broadcast_messages.len(), 1);
+ }
+
+ // Test that we do not retrieve the pending broadcast messages when we are not connected to any peer
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(node_0_events.len(), 0);
+
+ // Now we reconnect to a peer
+ nodes[0].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
+ features: nodes[2].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[2].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
+
+ // Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages
+ let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(node_0_events.len(), 1);
+ match &node_0_events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => (),
+ _ => panic!("Unexpected event"),
+ }
+ {
+ // Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages
+ let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
+ assert_eq!(pending_broadcast_messages.len(), 0);
+ }
+ }
+
#[test]
fn test_drop_disconnected_peers_when_removing_channels() {
let chanmon_cfgs = create_chanmon_cfgs(2);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-
- nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let channel_id = ChannelId::from_bytes([4; 32]);
let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
let intercept_id = InterceptId([0; 32]);
+ let error_message = "Channel force-closed";
// Test the API functions.
check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
- check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key);
+ check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
- check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key);
+ check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
// Dummy values
let channel_id = ChannelId::from_bytes([4; 32]);
+ let error_message = "Channel force-closed";
// Test the API functions.
check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
- check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+ check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
- check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+ check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
check_added_monitors!(nodes[0], 1);
expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
}
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
// A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
+ &nodes[0].keys_manager);
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
// Further, because all of our channels with nodes[0] are inbound, and none of them funded,
// it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
// Of course, however, outbound channels are always allowed
nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
// Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
// rejected.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
// but we can still open an outbound channel.
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
// but even with such an outbound channel, additional inbound channels will still fail.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
}
#[test]
_ => panic!("Unexpected event"),
}
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
// If we try to accept a channel from another peer non-0conf it will fail.
_ => panic!("Unexpected event"),
}
assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
// ...however if we accept the same channel 0conf it should work just fine.
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
};
// Check that if the amount we received + the penultimate hop extra fee is less than the sender
// intended amount, we fail the payment.
- let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height;
if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
}),
custom_tlvs: Vec::new(),
};
- let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height;
assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
current_height, node[0].node.default_configuration.accept_mpp_keysend).is_ok());
let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
let node = create_network(1, &node_cfg, &node_chanmgr);
- let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height;
let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
sender_intended_htlc_amt_msat: 100,
cltv_expiry_height: 22,
anchors_config.manually_accept_inbound_channels = true;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let error_message = "Channel force-closed";
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
+ assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
- nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
}
_ => panic!("Unexpected event"),
}
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
+ assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
// Since nodes[1] should not have accepted the channel, it should
// not have generated any events.
let user_config = test_default_channel_config();
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
+ let error_message = "Channel force-closed";
// Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
use crate::util::test_utils;
use crate::util::config::{UserConfig, MaxDustHTLCExposure};
+ use bitcoin::amount::Amount;
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::{Transaction, TxOut};
+ use bitcoin::transaction::Version;
use crate::sync::{Arc, Mutex, RwLock};
let tx;
if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
- tx = Transaction { version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
- value: 8_000_000, script_pubkey: output_script,
+ tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+ value: Amount::from_sat(8_000_000), script_pubkey: output_script,
}]};
node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap();
} else { panic!(); }
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
- let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
+ let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);