X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=1b34ab824d2174c576ced34d35b01d5e209e0b74;hb=af6c559cc98912e1cd3af10f660575ce2b4739f1;hp=3ef57c5b87f024e8861ecabf7226c4f6eae19ad7;hpb=1ac53ed02bf26520d3b1a2c3c0e90c8691c83099;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3ef57c5b..1b34ab82 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -19,7 +19,7 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; -use bitcoin::blockdata::constants::{genesis_block, ChainHash}; +use bitcoin::blockdata::constants::ChainHash; use bitcoin::network::constants::Network; use bitcoin::hashes::Hash; @@ -30,6 +30,8 @@ use bitcoin::secp256k1::{SecretKey,PublicKey}; use bitcoin::secp256k1::Secp256k1; use bitcoin::{LockTime, secp256k1, Sequence}; +use crate::blinded_path::BlindedPath; +use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs}; use crate::chain; use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; @@ -41,7 +43,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // construct one themselves. use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; -use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; +use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::Bolt11InvoiceFeatures; use crate::routing::gossip::NetworkGraph; @@ -53,8 +55,15 @@ use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs}; +use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration}; use crate::ln::wire::Encode; +use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, InvoiceBuilder}; +use crate::offers::invoice_error::InvoiceError; +use crate::offers::merkle::SignError; +use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder}; +use crate::offers::parse::Bolt12SemanticError; +use crate::offers::refund::{Refund, RefundBuilder}; +use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message}; use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, WriteableEcdsaChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; use crate::util::wakers::{Future, Notifier}; @@ -64,7 +73,7 @@ use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, Maybe use crate::util::logger::{Level, Logger}; use crate::util::errors::APIError; -use alloc::collections::BTreeMap; +use alloc::collections::{btree_map, BTreeMap}; use crate::io; use crate::prelude::*; @@ -77,7 +86,7 @@ use core::time::Duration; use core::ops::Deref; // Re-export this for use in the public API. -pub use crate::ln::outbound_payment::{PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; +pub(crate) use crate::ln::outbound_payment::{PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; use crate::ln::script::ShutdownScript; // We hold various information about HTLC relay in the HTLC objects in Channel itself: @@ -447,16 +456,17 @@ impl MsgHandleErrInternal { } #[inline] fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option, channel_capacity: u64) -> Self { + let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() }; + let action = if let (Some(_), ..) = &shutdown_res { + // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we + // should disconnect our peer such that we force them to broadcast their latest + // commitment upon reconnecting. + msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) } + } else { + msgs::ErrorAction::SendErrorMessage { msg: err_msg } + }; Self { - err: LightningError { - err: err.clone(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id, - data: err - }, - }, - }, + err: LightningError { err, action }, chan_id: Some((channel_id, user_channel_id)), shutdown_finish: Some((shutdown_res, channel_update)), channel_capacity: Some(channel_capacity) @@ -563,6 +573,7 @@ struct ClaimablePayments { /// usually because we're running pre-full-init. They are handled immediately once we detect we are /// running normally, and specifically must be processed before any other non-background /// [`ChannelMonitorUpdate`]s are applied. +#[derive(Debug)] enum BackgroundEvent { /// Handle a ChannelMonitorUpdate which closes the channel or for an already-closed channel. /// This is only separated from [`Self::MonitorUpdateRegeneratedOnStartup`] as the @@ -615,10 +626,34 @@ pub(crate) enum MonitorUpdateCompletionAction { event: events::Event, downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>, }, + /// Indicates we should immediately resume the operation of another channel, unless there is + /// some other reason why the channel is blocked. In practice this simply means immediately + /// removing the [`RAAMonitorUpdateBlockingAction`] provided from the blocking set. + /// + /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge + /// from completing a monitor update which removes the payment preimage until the inbound edge + /// completes a monitor update containing the payment preimage. However, we use this variant + /// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in + /// fact duplicative and we simply want to resume the outbound edge channel immediately. + /// + /// This variant should thus never be written to disk, as it is processed inline rather than + /// stored for later processing. + FreeOtherChannelImmediately { + downstream_counterparty_node_id: PublicKey, + downstream_funding_outpoint: OutPoint, + blocking_action: RAAMonitorUpdateBlockingAction, + }, } impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (0, PaymentClaimed) => { (0, payment_hash, required) }, + // Note that FreeOtherChannelImmediately should never be written - we were supposed to free + // *immediately*. However, for simplicity we implement read/write here. + (1, FreeOtherChannelImmediately) => { + (0, downstream_counterparty_node_id, required), + (2, downstream_funding_outpoint, required), + (4, blocking_action, required), + }, (2, EmitEventAndFreeOtherChannel) => { (0, event, upgradable_required), // LDK prior to 0.0.116 did not have this field as the monitor update application order was @@ -792,7 +827,8 @@ struct PendingInboundPayment { /// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types /// of [`KeysManager`] and [`DefaultRouter`]. /// -/// This is not exported to bindings users as Arcs don't make sense in bindings +/// This is not exported to bindings users as type aliases aren't supported in most languages. +#[cfg(not(c_bindings))] pub type SimpleArcChannelManager = ChannelManager< Arc, Arc, @@ -803,9 +839,7 @@ pub type SimpleArcChannelManager = ChannelManager< Arc>>, Arc, - Arc>>, Arc>>>, - ProbabilisticScoringFeeParameters, - ProbabilisticScorer>>, Arc>, + Arc>>, Arc>>>, >>, Arc >; @@ -820,7 +854,8 @@ pub type SimpleArcChannelManager = ChannelManager< /// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types /// of [`KeysManager`] and [`DefaultRouter`]. /// -/// This is not exported to bindings users as Arcs don't make sense in bindings +/// This is not exported to bindings users as type aliases aren't supported in most languages. +#[cfg(not(c_bindings))] pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager< &'a M, @@ -832,14 +867,15 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = &'e DefaultRouter< &'f NetworkGraph<&'g L>, &'g L, - &'h Mutex, &'g L>>, - ProbabilisticScoringFeeParameters, - ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L> + &'h RwLock, &'g L>>, >, &'g L >; /// A trivial trait which describes any [`ChannelManager`]. +/// +/// This is not exported to bindings users as general cover traits aren't useful in other +/// languages. pub trait AChannelManager { /// A type implementing [`chain::Watch`]. type Watch: chain::Watch + ?Sized; @@ -975,6 +1011,8 @@ where // // Lock order tree: // +// `pending_offers_messages` +// // `total_consistency_lock` // | // |__`forward_htlcs` @@ -982,26 +1020,26 @@ where // | |__`pending_intercepted_htlcs` // | // |__`per_peer_state` -// | | -// | |__`pending_inbound_payments` -// | | -// | |__`claimable_payments` -// | | -// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds -// | | -// | |__`peer_state` -// | | -// | |__`id_to_peer` -// | | -// | |__`short_to_chan_info` -// | | -// | |__`outbound_scid_aliases` -// | | -// | |__`best_block` -// | | -// | |__`pending_events` -// | | -// | |__`pending_background_events` +// | +// |__`pending_inbound_payments` +// | +// |__`claimable_payments` +// | +// |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds +// | +// |__`peer_state` +// | +// |__`id_to_peer` +// | +// |__`short_to_chan_info` +// | +// |__`outbound_scid_aliases` +// | +// |__`best_block` +// | +// |__`pending_events` +// | +// |__`pending_background_events` // pub struct ChannelManager where @@ -1015,7 +1053,7 @@ where L::Target: Logger, { default_configuration: UserConfig, - genesis_hash: BlockHash, + chain_hash: ChainHash, fee_estimator: LowerBoundedFeeEstimator, chain_monitor: M, tx_broadcaster: T, @@ -1201,12 +1239,20 @@ where /// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the /// Notifier the lock contains sends out a notification when the lock is released. total_consistency_lock: RwLock<()>, + /// Tracks the progress of channels going through batch funding by whether funding_signed was + /// received and the monitor has been persisted. + /// + /// This information does not need to be persisted as funding nodes can forget + /// unfunded channels upon disconnection. + funding_batch_states: Mutex>>, background_events_processed_since_startup: AtomicBool, event_persist_notifier: Notifier, needs_persist_flag: AtomicBool, + pending_offers_messages: Mutex>>, + entropy_source: ES, node_signer: NS, signer_provider: SP, @@ -1442,12 +1488,6 @@ pub struct ChannelCounterparty { } /// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`] -/// -/// Balances of a channel are available through [`ChainMonitor::get_claimable_balances`] and -/// [`ChannelMonitor::get_claimable_balances`], calculated with respect to the corresponding on-chain -/// transactions. -/// -/// [`ChainMonitor::get_claimable_balances`]: crate::chain::chainmonitor::ChainMonitor::get_claimable_balances #[derive(Clone, Debug, PartialEq)] pub struct ChannelDetails { /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes, @@ -1529,11 +1569,24 @@ pub struct ChannelDetails { /// /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115. pub feerate_sat_per_1000_weight: Option, + /// Our total balance. This is the amount we would get if we close the channel. + /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this + /// amount is not likely to be recoverable on close. + /// + /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose + /// balance is not available for inclusion in new outbound HTLCs). This further does not include + /// any pending outgoing HTLCs which are awaiting some other resolution to be sent. + /// This does not consider any on-chain fees. + /// + /// See also [`ChannelDetails::outbound_capacity_msat`] + pub balance_msat: u64, /// The available outbound capacity for sending HTLCs to the remote peer. This does not include /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not /// available for inclusion in new outbound HTLCs). This further does not include any pending /// outgoing HTLCs which are awaiting some other resolution to be sent. /// + /// See also [`ChannelDetails::balance_msat`] + /// /// This value is not exact. Due to various in-flight changes, feerate changes, and our /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we /// should be able to spend nearly this amount. @@ -1543,8 +1596,8 @@ pub struct ChannelDetails { /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us /// to use a limit as close as possible to the HTLC limit we can currently send. /// - /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`] and - /// [`ChannelDetails::outbound_capacity_msat`]. + /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`], + /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`]. pub next_outbound_htlc_limit_msat: u64, /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than @@ -1674,6 +1727,7 @@ impl ChannelDetails { channel_value_satoshis: context.get_value_satoshis(), feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()), unspendable_punishment_reserve: to_self_reserve_satoshis, + balance_msat: balance.balance_msat, inbound_capacity_msat: balance.inbound_capacity_msat, outbound_capacity_msat: balance.outbound_capacity_msat, next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, @@ -1788,7 +1842,7 @@ macro_rules! handle_error { let mut msg_events = Vec::with_capacity(2); if let Some((shutdown_res, update_option)) = shutdown_finish { - $self.finish_force_close_channel(shutdown_res); + $self.finish_close_channel(shutdown_res); if let Some(update) = update_option { msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -1995,7 +2049,7 @@ macro_rules! emit_channel_ready_event { macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { let mut updates = $chan.monitor_updating_restored(&$self.logger, - &$self.node_signer, $self.genesis_hash, &$self.default_configuration, + &$self.node_signer, $self.chain_hash, &$self.default_configuration, $self.best_block.read().unwrap().height()); let counterparty_node_id = $chan.context.get_counterparty_node_id(); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { @@ -2025,9 +2079,54 @@ macro_rules! handle_monitor_update_completion { } let channel_id = $chan.context.channel_id(); + let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); + // If the channel belongs to a batch funding transaction, the progress of the batch + // should be updated as we have received funding_signed and persisted the monitor. + if let Some(txid) = unbroadcasted_batch_funding_txid { + let mut funding_batch_states = $self.funding_batch_states.lock().unwrap(); + let mut batch_completed = false; + if let Some(batch_state) = funding_batch_states.get_mut(&txid) { + let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| ( + *chan_id == channel_id && + *pubkey == counterparty_node_id + )); + if let Some(channel_state) = channel_state { + channel_state.2 = true; + } else { + debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update"); + } + batch_completed = batch_state.iter().all(|(_, _, completed)| *completed); + } else { + debug_assert!(false, "Missing batch state for channel which completed initial monitor update"); + } + + // When all channels in a batched funding transaction have become ready, it is not necessary + // to track the progress of the batch anymore and the state of the channels can be updated. + if batch_completed { + let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten(); + let per_peer_state = $self.per_peer_state.read().unwrap(); + let mut batch_funding_tx = None; + for (channel_id, counterparty_node_id, _) in removed_batch_state { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { + batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding()); + chan.set_batch_ready(); + let mut pending_events = $self.pending_events.lock().unwrap(); + emit_channel_pending_event!(pending_events, chan); + } + } + } + if let Some(tx) = batch_funding_tx { + log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.txid()); + $self.tx_broadcaster.broadcast_transactions(&[&tx]); + } + } + } + $self.handle_monitor_update_completion_actions(update_actions); if let Some(forwards) = htlc_forwards { @@ -2196,7 +2295,7 @@ where let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); ChannelManager { default_configuration: config.clone(), - genesis_hash: genesis_block(params.network).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(params.network), fee_estimator: LowerBoundedFeeEstimator::new(fee_est), chain_monitor, tx_broadcaster, @@ -2230,9 +2329,11 @@ where pending_background_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), background_events_processed_since_startup: AtomicBool::new(false), - event_persist_notifier: Notifier::new(), needs_persist_flag: AtomicBool::new(false), + funding_batch_states: Mutex::new(BTreeMap::new()), + + pending_offers_messages: Mutex::new(Vec::new()), entropy_source, node_signer, @@ -2255,7 +2356,7 @@ where if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias outbound_scid_alias += 1; } else { - outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); + outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source); } if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) { break; @@ -2325,7 +2426,7 @@ where }, } }; - let res = channel.get_open_channel(self.genesis_hash.clone()); + let res = channel.get_open_channel(self.chain_hash); let temporary_channel_id = channel.context.channel_id(); match peer_state.channel_by_id.entry(temporary_channel_id) { @@ -2497,6 +2598,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; + let mut shutdown_result = None; loop { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -2511,6 +2613,7 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let funding_txo_opt = chan.context.get_funding_txo(); let their_features = &peer_state.latest_features; + let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); let (shutdown_msg, mut monitor_update_opt, htlcs) = chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; failed_htlcs = htlcs; @@ -2541,6 +2644,7 @@ where }); } self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); + shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid)); } } break; @@ -2551,6 +2655,8 @@ where // it does not exist for this peer. Either way, we can attempt to force-close it. // // An appropriate error will be returned for non-existence of the channel if that's the case. + mem::drop(peer_state_lock); + mem::drop(per_peer_state); return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ()) }, } @@ -2562,6 +2668,10 @@ where self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } + if let Some(shutdown_result) = shutdown_result { + self.finish_close_channel(shutdown_result); + } + Ok(()) } @@ -2569,11 +2679,11 @@ where /// will be accepted on the given channel, and after additional timeout/the closing of all /// pending HTLCs, the channel will be closed on chain. /// - /// * If we are the channel initiator, we will pay between our [`Background`] and - /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee - /// estimate. + /// * If we are the channel initiator, we will pay between our [`ChannelCloseMinimum`] and + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`] + /// fee estimate. /// * If our counterparty is the channel initiator, we will require a channel closing - /// transaction feerate of at least our [`Background`] feerate or the feerate which + /// transaction feerate of at least our [`ChannelCloseMinimum`] feerate or the feerate which /// would appear on a force-closure transaction, whichever is lower. We will allow our /// counterparty to pay as much fee as they'd like, however. /// @@ -2585,8 +2695,8 @@ where /// channel. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis - /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background - /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`ChannelCloseMinimum`]: crate::chain::chaininterface::ConfirmationTarget::ChannelCloseMinimum + /// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, None, None) @@ -2600,8 +2710,8 @@ where /// the channel being closed or not: /// * If we are the channel initiator, we will pay at least this feerate on the closing /// transaction. The upper-bound is set by - /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee - /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`] + /// fee estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which /// will appear on a force-closure transaction, whichever is lower). @@ -2619,21 +2729,20 @@ where /// channel. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis - /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background - /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, shutdown_script: Option) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script) } - fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { + fn finish_close_channel(&self, shutdown_res: ShutdownResult) { debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); #[cfg(debug_assertions)] for (_, peer) in self.per_peer_state.read().unwrap().iter() { debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); } - let (monitor_update_option, mut failed_htlcs) = shutdown_res; + let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res; log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; @@ -2648,6 +2757,31 @@ where // ignore the result here. let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update); } + let mut shutdown_results = Vec::new(); + if let Some(txid) = unbroadcasted_batch_funding_txid { + let mut funding_batch_states = self.funding_batch_states.lock().unwrap(); + let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten(); + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut has_uncompleted_channel = None; + for (channel_id, counterparty_node_id, state) in affected_channels { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { + update_maps_on_chan_removal!(self, &chan.context()); + self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure); + shutdown_results.push(chan.context_mut().force_shutdown(false)); + } + } + has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); + } + debug_assert!( + has_uncompleted_channel.unwrap_or(true), + "Closing a batch where all channels have completed initial monitor update", + ); + } + for shutdown_result in shutdown_results.drain(..) { + self.finish_close_channel(shutdown_result); + } } /// `peer_msg` should be set when we receive a message from a peer, but not set when the @@ -2672,11 +2806,11 @@ where mem::drop(per_peer_state); match chan_phase { ChannelPhase::Funded(mut chan) => { - self.finish_force_close_channel(chan.context.force_shutdown(broadcast)); + self.finish_close_channel(chan.context.force_shutdown(broadcast)); (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id()) }, ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => { - self.finish_force_close_channel(chan_phase.context_mut().force_shutdown(false)); + self.finish_close_channel(chan_phase.context_mut().force_shutdown(false)); // Unfunded channel has no update (None, chan_phase.context().get_counterparty_node_id()) }, @@ -2718,8 +2852,8 @@ where peer_state.pending_msg_events.push( events::MessageSendEvent::HandleError { node_id: counterparty_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() } + action: msgs::ErrorAction::DisconnectPeer { + msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }) }, } ); @@ -2844,7 +2978,7 @@ where // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). let current_height: u32 = self.best_block.read().unwrap().height(); - if (outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { + if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 { let mut err_data = Vec::with_capacity(12); err_data.extend_from_slice(&amt_msat.to_be_bytes()); err_data.extend_from_slice(¤t_height.to_be_bytes()); @@ -3007,8 +3141,8 @@ where // Note that this is likely a timing oracle for detecting whether an scid is a // phantom or an intercept. if (self.default_configuration.accept_intercept_htlcs && - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) || - fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash) + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) || + fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash) { None } else { @@ -3236,7 +3370,7 @@ where }; let unsigned = msgs::UnsignedChannelUpdate { - chain_hash: self.genesis_hash, + chain_hash: self.chain_hash, short_channel_id, timestamp: chan.context.get_update_time_counter(), flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), @@ -3251,7 +3385,7 @@ where // If we returned an error and the `node_signer` cannot provide a signature for whatever // reason`, we wouldn't be able to receive inbound payments through the corresponding // channel. - let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap(); + let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(unsigned.clone())).unwrap(); Ok(msgs::ChannelUpdate { signature: sig, @@ -3388,9 +3522,8 @@ where /// In general, a path may raise: /// * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee, /// node public key) is specified. - /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates - /// (including due to previous monitor update failure or new permanent monitor update - /// failure). + /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available as it has been + /// closed, doesn't exist, or the peer is currently disconnected. /// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the /// relevant updates. /// @@ -3445,6 +3578,17 @@ where self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata); } + pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> { + let best_block_height = self.best_block.read().unwrap().height(); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + self.pending_outbound_payments + .send_payment_for_bolt12_invoice( + invoice, payment_id, &self.router, self.list_usable_channels(), + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, + best_block_height, &self.logger, &self.pending_events, + |args| self.send_payment_along_path(args) + ) + } /// Signals that no further attempts for the given payment should occur. Useful if you have a /// pending outbound payment with retries remaining, but wish to stop retrying the payment before @@ -3461,9 +3605,10 @@ where /// /// # Requested Invoices /// - /// In the case of paying a [`Bolt12Invoice`], abandoning the payment prior to receiving the - /// invoice will result in an [`Event::InvoiceRequestFailed`] and prevent any attempts at paying - /// it once received. The other events may only be generated once the invoice has been received. + /// In the case of paying a [`Bolt12Invoice`] via [`ChannelManager::pay_for_offer`], abandoning + /// the payment prior to receiving the invoice will result in an [`Event::InvoiceRequestFailed`] + /// and prevent any attempts at paying it once received. The other events may only be generated + /// once the invoice has been received. /// /// # Restart Behavior /// @@ -3537,7 +3682,7 @@ where /// /// See [`ChannelManager::send_preflight_probes`] for more information. pub fn send_spontaneous_preflight_probes( - &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32, + &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32, liquidity_limit_multiplier: Option, ) -> Result, ProbeSendFailure> { let payment_params = @@ -3644,8 +3789,9 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern, &Transaction) -> Result>( - &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput + fn funding_transaction_generated_intern, &Transaction) -> Result>( + &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, is_batch_funding: bool, + mut find_funding_output: FundingOutput, ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -3657,7 +3803,7 @@ where Some(ChannelPhase::UnfundedOutboundV1(chan)) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; - let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger) + let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &self.logger) .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { let channel_id = chan.context.channel_id(); let user_id = chan.context.get_user_id(); @@ -3713,7 +3859,7 @@ where #[cfg(test)] pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> { - self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| { + self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_, tx| { Ok(OutPoint { txid: tx.txid(), index: output_index }) }) } @@ -3749,17 +3895,37 @@ where /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed pub fn funding_transaction_generated(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> { + self.batch_funding_transaction_generated(&[(temporary_channel_id, counterparty_node_id)], funding_transaction) + } + + /// Call this upon creation of a batch funding transaction for the given channels. + /// + /// Return values are identical to [`Self::funding_transaction_generated`], respective to + /// each individual channel and transaction output. + /// + /// Do NOT broadcast the funding transaction yourself. This batch funding transcaction + /// will only be broadcast when we have safely received and persisted the counterparty's + /// signature for each channel. + /// + /// If there is an error, all channels in the batch are to be considered closed. + pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let mut result = Ok(()); if !funding_transaction.is_coin_base() { for inp in funding_transaction.input.iter() { if inp.witness.is_empty() { - return Err(APIError::APIMisuseError { + result = result.and(Err(APIError::APIMisuseError { err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned() - }); + })); } } } + if funding_transaction.output.len() > u16::max_value() as usize { + result = result.and(Err(APIError::APIMisuseError { + err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() + })); + } { let height = self.best_block.read().unwrap().height(); // Transactions are evaluated as final by network mempools if their locktime is strictly @@ -3767,37 +3933,93 @@ where // node might not have perfect sync about their blockchain views. Thus, if the wallet // module is ahead of LDK, only allow one more block of headroom. if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 1 { - return Err(APIError::APIMisuseError { + result = result.and(Err(APIError::APIMisuseError { err: "Funding transaction absolute timelock is non-final".to_owned() - }); + })); } } - self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| { - if tx.output.len() > u16::max_value() as usize { - return Err(APIError::APIMisuseError { - err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() - }); - } - let mut output_index = None; - let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh(); - for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { - if output_index.is_some() { + let txid = funding_transaction.txid(); + let is_batch_funding = temporary_channels.len() > 1; + let mut funding_batch_states = if is_batch_funding { + Some(self.funding_batch_states.lock().unwrap()) + } else { + None + }; + let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| { + match states.entry(txid) { + btree_map::Entry::Occupied(_) => { + result = result.clone().and(Err(APIError::APIMisuseError { + err: "Batch funding transaction with the same txid already exists".to_owned() + })); + None + }, + btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())), + } + }); + for &(temporary_channel_id, counterparty_node_id) in temporary_channels.iter() { + result = result.and_then(|_| self.funding_transaction_generated_intern( + temporary_channel_id, + counterparty_node_id, + funding_transaction.clone(), + is_batch_funding, + |chan, tx| { + let mut output_index = None; + let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh(); + for (idx, outp) in tx.output.iter().enumerate() { + if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { + if output_index.is_some() { + return Err(APIError::APIMisuseError { + err: "Multiple outputs matched the expected script and value".to_owned() + }); + } + output_index = Some(idx as u16); + } + } + if output_index.is_none() { return Err(APIError::APIMisuseError { - err: "Multiple outputs matched the expected script and value".to_owned() + err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned() }); } - output_index = Some(idx as u16); + let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() }; + if let Some(funding_batch_state) = funding_batch_state.as_mut() { + funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false)); + } + Ok(outpoint) + }) + ); + } + if let Err(ref e) = result { + // Remaining channels need to be removed on any error. + let e = format!("Error in transaction funding: {:?}", e); + let mut channels_to_remove = Vec::new(); + channels_to_remove.extend(funding_batch_states.as_mut() + .and_then(|states| states.remove(&txid)) + .into_iter().flatten() + .map(|(chan_id, node_id, _state)| (chan_id, node_id)) + ); + channels_to_remove.extend(temporary_channels.iter() + .map(|(&chan_id, &node_id)| (chan_id, node_id)) + ); + let mut shutdown_results = Vec::new(); + { + let per_peer_state = self.per_peer_state.read().unwrap(); + for (channel_id, counterparty_node_id) in channels_to_remove { + per_peer_state.get(&counterparty_node_id) + .map(|peer_state_mutex| peer_state_mutex.lock().unwrap()) + .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id)) + .map(|mut chan| { + update_maps_on_chan_removal!(self, &chan.context()); + self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() }); + shutdown_results.push(chan.context_mut().force_shutdown(false)); + }); } } - if output_index.is_none() { - return Err(APIError::APIMisuseError { - err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned() - }); + for shutdown_result in shutdown_results.drain(..) { + self.finish_close_channel(shutdown_result); } - Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() }) - }) + } + result } /// Atomically applies partial updates to the [`ChannelConfig`] of the given channels. @@ -3840,7 +4062,7 @@ where for channel_id in channel_ids { if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { - err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", channel_id, counterparty_node_id), + err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id), }); }; } @@ -3951,7 +4173,7 @@ where next_hop_channel_id, next_node_id) }), None => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}.", + err: format!("Channel with id {} not found for the passed counterparty node_id {}", next_hop_channel_id, next_node_id) }) } @@ -4087,7 +4309,7 @@ where } if let PendingHTLCRouting::Forward { onion_packet, .. } = routing { let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode); - if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) { + if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) { let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes(); let next_hop = match onion_utils::decode_next_payment_hop( phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, @@ -4135,8 +4357,9 @@ where } } } - let (counterparty_node_id, forward_chan_id) = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) { - Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), + let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned(); + let (counterparty_node_id, forward_chan_id) = match chan_info_opt { + Some((cp_id, chan_id)) => (cp_id, chan_id), None => { forwarding_channel_not_found!(); continue; @@ -4564,8 +4787,10 @@ where if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() { - log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", + if new_feerate != chan.context.get_feerate_sat_per_1000_weight() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); + } return NotifyOption::SkipPersistNoEvents; } if !chan.context.is_live() { @@ -4589,8 +4814,8 @@ where PersistenceNotifierGuard::optionally_notify(self, || { let mut should_persist = NotifyOption::SkipPersistNoEvents; - let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); + let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); + let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { @@ -4600,9 +4825,9 @@ where |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None } ) { let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - min_mempool_feerate + anchor_feerate } else { - normal_feerate + non_anchor_feerate }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -4624,6 +4849,10 @@ where /// with the current [`ChannelConfig`]. /// * Removing peers which have disconnected but and no longer have any channels. /// * Force-closing and removing channels which have not completed establishment in a timely manner. + /// * Forgetting about stale outbound payments, either those that have already been fulfilled + /// or those awaiting an invoice that hasn't been delivered in the necessary amount of time. + /// The latter is determined using the system clock in `std` and the highest seen block time + /// minus two hours in `no-std`. /// /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate /// estimate fetches. @@ -4634,8 +4863,8 @@ where PersistenceNotifierGuard::optionally_notify(self, || { let mut should_persist = NotifyOption::SkipPersistNoEvents; - let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); + let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); + let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee); let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); @@ -4682,9 +4911,9 @@ where match phase { ChannelPhase::Funded(chan) => { let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - min_mempool_feerate + anchor_feerate } else { - normal_feerate + non_anchor_feerate }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -4849,10 +5078,21 @@ where } for shutdown_res in shutdown_channels { - self.finish_force_close_channel(shutdown_res); + self.finish_close_channel(shutdown_res); } - self.pending_outbound_payments.remove_stale_payments(&self.pending_events); + #[cfg(feature = "std")] + let duration_since_epoch = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH"); + #[cfg(not(feature = "std"))] + let duration_since_epoch = Duration::from_secs( + self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64 + ); + + self.pending_outbound_payments.remove_stale_payments( + duration_since_epoch, &self.pending_events + ); // Technically we don't need to do this here, but if we have holding cell entries in a // channel that need freeing, it's better to do that here and block a background task @@ -5192,8 +5432,11 @@ where for htlc in sources.drain(..) { if let Err((pk, err)) = self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, - |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })) - { + |_, definitely_duplicate| { + debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment"); + Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }) + } + ) { if let msgs::ErrorAction::IgnoreError = err.err.action { // We got a temporary failure updating monitor, but will claim the // HTLC when the monitor updating is restored (or on chain). @@ -5221,7 +5464,7 @@ where } } - fn claim_funds_from_hop) -> Option>(&self, + fn claim_funds_from_hop, bool) -> Option>(&self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) -> Result<(), (PublicKey, MsgHandleErrInternal)> { //TODO: Delay the claimed_funds relaying just like we do outbound relay! @@ -5231,6 +5474,11 @@ where // `BackgroundEvent`s. let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire); + // As we may call handle_monitor_update_completion_actions in rather rare cases, check that + // the required mutexes are not held before we start. + debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread); + { let per_peer_state = self.per_peer_state.read().unwrap(); let chan_id = prev_hop.outpoint.to_channel_id(); @@ -5252,25 +5500,70 @@ where let counterparty_node_id = chan.context.get_counterparty_node_id(); let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger); - if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { - if let Some(action) = completion_action(Some(htlc_value_msat)) { - log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}", - chan_id, action); - peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); + match fulfill_res { + UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => { + if let Some(action) = completion_action(Some(htlc_value_msat), false) { + log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}", + chan_id, action); + peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); + } + if !during_init { + handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, + peer_state, per_peer_state, chan); + } else { + // If we're running during init we cannot update a monitor directly - + // they probably haven't actually been loaded yet. Instead, push the + // monitor update as a background event. + self.pending_background_events.lock().unwrap().push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo: prev_hop.outpoint, + update: monitor_update.clone(), + }); + } } - if !during_init { - handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, - peer_state, per_peer_state, chan); - } else { - // If we're running during init we cannot update a monitor directly - - // they probably haven't actually been loaded yet. Instead, push the - // monitor update as a background event. - self.pending_background_events.lock().unwrap().push( - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo: prev_hop.outpoint, - update: monitor_update.clone(), - }); + UpdateFulfillCommitFetch::DuplicateClaim {} => { + let action = if let Some(action) = completion_action(None, true) { + action + } else { + return Ok(()); + }; + mem::drop(peer_state_lock); + + log_trace!(self.logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}", + chan_id, action); + let (node_id, funding_outpoint, blocker) = + if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + downstream_counterparty_node_id: node_id, + downstream_funding_outpoint: funding_outpoint, + blocking_action: blocker, + } = action { + (node_id, funding_outpoint, blocker) + } else { + debug_assert!(false, + "Duplicate claims should always free another channel immediately"); + return Ok(()); + }; + if let Some(peer_state_mtx) = per_peer_state.get(&node_id) { + let mut peer_state = peer_state_mtx.lock().unwrap(); + if let Some(blockers) = peer_state + .actions_blocking_raa_monitor_updates + .get_mut(&funding_outpoint.to_channel_id()) + { + let mut found_blocker = false; + blockers.retain(|iter| { + // Note that we could actually be blocked, in + // which case we need to only remove the one + // blocker which was added duplicatively. + let first_blocker = !found_blocker; + if *iter == blocker { found_blocker = true; } + *iter != blocker || !first_blocker + }); + debug_assert!(found_blocker); + } + } else { + debug_assert!(false); + } } } } @@ -5318,7 +5611,7 @@ where // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are // generally always allowed to be duplicative (and it's specifically noted in // `PaymentForwarded`). - self.handle_monitor_update_completion_actions(completion_action(None)); + self.handle_monitor_update_completion_actions(completion_action(None, false)); Ok(()) } @@ -5327,7 +5620,7 @@ where } fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, - forwarded_htlc_value_msat: Option, from_onchain: bool, + forwarded_htlc_value_msat: Option, from_onchain: bool, startup_replay: bool, next_channel_counterparty_node_id: Option, next_channel_outpoint: OutPoint ) { match source { @@ -5348,13 +5641,84 @@ where HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); + #[cfg(debug_assertions)] + let claiming_chan_funding_outpoint = hop_data.outpoint; let res = self.claim_funds_from_hop(hop_data, payment_preimage, - |htlc_claim_value_msat| { - if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { None }; + |htlc_claim_value_msat, definitely_duplicate| { + let chan_to_release = + if let Some(node_id) = next_channel_counterparty_node_id { + Some((node_id, next_channel_outpoint, completed_blocker)) + } else { + // We can only get `None` here if we are processing a + // `ChannelMonitor`-originated event, in which case we + // don't care about ensuring we wake the downstream + // channel's monitor updating - the channel is already + // closed. + None + }; + if definitely_duplicate && startup_replay { + // On startup we may get redundant claims which are related to + // monitor updates still in flight. In that case, we shouldn't + // immediately free, but instead let that monitor update complete + // in the background. + #[cfg(debug_assertions)] { + let background_events = self.pending_background_events.lock().unwrap(); + // There should be a `BackgroundEvent` pending... + assert!(background_events.iter().any(|ev| { + match ev { + // to apply a monitor update that blocked the claiming channel, + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + funding_txo, update, .. + } => { + if *funding_txo == claiming_chan_funding_outpoint { + assert!(update.updates.iter().any(|upd| + if let ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: update_preimage + } = upd { + payment_preimage == *update_preimage + } else { false } + ), "{:?}", update); + true + } else { false } + }, + // or the channel we'd unblock is already closed, + BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup( + (funding_txo, monitor_update) + ) => { + if *funding_txo == next_channel_outpoint { + assert_eq!(monitor_update.updates.len(), 1); + assert!(matches!( + monitor_update.updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); + true + } else { false } + }, + // or the monitor update has completed and will unblock + // immediately once we get going. + BackgroundEvent::MonitorUpdatesComplete { + channel_id, .. + } => + *channel_id == claiming_chan_funding_outpoint.to_channel_id(), + } + }), "{:?}", *background_events); + } + None + } else if definitely_duplicate { + if let Some(other_chan) = chan_to_release { + Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + downstream_counterparty_node_id: other_chan.0, + downstream_funding_outpoint: other_chan.1, + blocking_action: other_chan.2, + }) + } else { None } + } else { + let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { None } + } else { None }; Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { event: events::Event::PaymentForwarded { fee_earned_msat, @@ -5363,19 +5727,9 @@ where next_channel_id: Some(next_channel_outpoint.to_channel_id()), outbound_amount_forwarded_msat: forwarded_htlc_value_msat, }, - downstream_counterparty_and_funding_outpoint: - if let Some(node_id) = next_channel_counterparty_node_id { - Some((node_id, next_channel_outpoint, completed_blocker)) - } else { - // We can only get `None` here if we are processing a - // `ChannelMonitor`-originated event, in which case we - // don't care about ensuring we wake the downstream - // channel's monitor updating - the channel is already - // closed. - None - }, + downstream_counterparty_and_funding_outpoint: chan_to_release, }) - } else { None } + } }); if let Err((pk, err)) = res { let result: Result<(), _> = Err(err); @@ -5391,6 +5745,10 @@ where } fn handle_monitor_update_completion_actions>(&self, actions: I) { + debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + for action in actions.into_iter() { match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => { @@ -5420,6 +5778,15 @@ where self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker)); } }, + MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action, + } => { + self.handle_monitor_update_release( + downstream_counterparty_node_id, + downstream_funding_outpoint, + Some(blocking_action), + ); + }, } } } @@ -5711,7 +6078,7 @@ where fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are // likely to be lost on restart! - if msg.chain_hash != self.genesis_hash { + if msg.chain_hash != self.chain_hash { return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); } @@ -5976,7 +6343,7 @@ where hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer, - self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry); + self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry); if let Some(announcement_sigs) = announcement_sigs_opt { log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id()); peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { @@ -6075,13 +6442,15 @@ where self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } if let Some(shutdown_res) = finish_shutdown { - self.finish_force_close_channel(shutdown_res); + self.finish_close_channel(shutdown_res); } Ok(()) } fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> { + let mut shutdown_result = None; + let unbroadcasted_batch_funding_txid; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -6094,6 +6463,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry); if let Some(msg) = closing_signed { peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { @@ -6130,6 +6500,11 @@ where }); } self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); + shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid)); + } + mem::drop(per_peer_state); + if let Some(shutdown_result) = shutdown_result { + self.finish_close_channel(shutdown_result); } Ok(()) } @@ -6214,6 +6589,9 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry); if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { + log_trace!(self.logger, + "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", + msg.channel_id); peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id) .or_insert_with(Vec::new) .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop)); @@ -6234,7 +6612,7 @@ where hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, Some(*counterparty_node_id), funding_txo); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo); Ok(()) } @@ -6345,7 +6723,7 @@ where }, hash_map::Entry::Vacant(entry) => { if !is_our_scid && forward_info.incoming_amt_msat.is_some() && - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash) + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash) { let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner()); let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); @@ -6538,7 +6916,7 @@ where peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: try_chan_phase_entry!(self, chan.announcement_signatures( - &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), + &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(), msg, &self.default_configuration ), chan_phase_entry), // Note that announcement_signatures fails if the channel cannot be announced, @@ -6588,8 +6966,13 @@ where if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersistNoEvents); } else { - log_debug!(self.logger, "Received channel_update for channel {}.", chan_id); - try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry); + log_debug!(self.logger, "Received channel_update {:?} for channel {}.", msg, chan_id); + let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry); + // If nothing changed after applying their update, we don't need to bother + // persisting. + if !did_change { + return Ok(NotifyOption::SkipPersistNoEvents); + } } } else { return try_chan_phase_entry!(self, Err(ChannelError::Close( @@ -6609,7 +6992,10 @@ where let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + MsgHandleErrInternal::send_err_msg_no_close( + format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), + msg.channel_id + ) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6621,7 +7007,7 @@ where // freed HTLCs to fail backwards. If in the future we no longer drop pending // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. let responses = try_chan_phase_entry!(self, chan.channel_reestablish( - msg, &self.logger, &self.node_signer, self.genesis_hash, + msg, &self.logger, &self.node_signer, self.chain_hash, &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry); let mut channel_update = None; if let Some(msg) = responses.shutdown_msg { @@ -6653,7 +7039,39 @@ where "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => { + log_debug!(self.logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure", + log_bytes!(msg.channel_id.0)); + // Unfortunately, lnd doesn't force close on errors + // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119). + // One of the few ways to get an lnd counterparty to force close is by + // replicating what they do when restoring static channel backups (SCBs). They + // send an invalid `ChannelReestablish` with `0` commitment numbers and an + // invalid `your_last_per_commitment_secret`. + // + // Since we received a `ChannelReestablish` for a channel that doesn't exist, we + // can assume it's likely the channel closed from our point of view, but it + // remains open on the counterparty's side. By sending this bogus + // `ChannelReestablish` message now as a response to theirs, we trigger them to + // force close broadcasting their latest state. If the closing transaction from + // our point of view remains unconfirmed, it'll enter a race with the + // counterparty's to-be-broadcast latest commitment transaction. + peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish { + node_id: *counterparty_node_id, + msg: msgs::ChannelReestablish { + channel_id: msg.channel_id, + next_local_commitment_number: 0, + next_remote_commitment_number: 0, + your_last_per_commitment_secret: [1u8; 32], + my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(), + next_funding_txid: None, + }, + }); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", + counterparty_node_id), msg.channel_id) + ) + } } }; @@ -6682,7 +7100,7 @@ where MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage); - self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, counterparty_node_id, funding_outpoint); + self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; @@ -6717,8 +7135,8 @@ where self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.context.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() } + action: msgs::ErrorAction::DisconnectPeer { + msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }) }, }); } @@ -6734,7 +7152,7 @@ where } for failure in failed_channels.drain(..) { - self.finish_force_close_channel(failure); + self.finish_close_channel(failure); } has_pending_monitor_events @@ -6804,6 +7222,7 @@ where fn maybe_generate_initial_closing_signed(&self) -> bool { let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); let mut has_update = false; + let mut shutdown_results = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -6814,6 +7233,7 @@ where peer_state.channel_by_id.retain(|channel_id, phase| { match phase { ChannelPhase::Funded(chan) => { + let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { Ok((msg_opt, tx_opt)) => { if let Some(msg) = msg_opt { @@ -6836,6 +7256,7 @@ where log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, &chan.context); + shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid)); false } else { true } }, @@ -6857,6 +7278,10 @@ where let _ = handle_error!(self, err, counterparty_node_id); } + for shutdown_result in shutdown_results.drain(..) { + self.finish_close_channel(shutdown_result); + } + has_update } @@ -6882,7 +7307,289 @@ where counterparty_node_id, funding_txo, update }); } - self.finish_force_close_channel(failure); + self.finish_close_channel(failure); + } + } + + /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the + /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will + /// not have an expiration unless otherwise set on the builder. + /// + /// # Privacy + /// + /// Uses a one-hop [`BlindedPath`] for the offer with [`ChannelManager::get_our_node_id`] as the + /// introduction node and a derived signing pubkey for recipient privacy. As such, currently, + /// the node must be announced. Otherwise, there is no way to find a path to the introduction + /// node in order to send the [`InvoiceRequest`]. + /// + /// # Limitations + /// + /// Requires a direct connection to the introduction node in the responding [`InvoiceRequest`]'s + /// reply path. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// + /// [`Offer`]: crate::offers::offer::Offer + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + pub fn create_offer_builder( + &self, description: String + ) -> OfferBuilder { + let node_id = self.get_our_node_id(); + let expanded_key = &self.inbound_payment_key; + let entropy = &*self.entropy_source; + let secp_ctx = &self.secp_ctx; + let path = self.create_one_hop_blinded_path(); + + OfferBuilder::deriving_signing_pubkey(description, node_id, expanded_key, entropy, secp_ctx) + .chain_hash(self.chain_hash) + .path(path) + } + + /// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the + /// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund. + /// + /// # Payment + /// + /// The provided `payment_id` is used to ensure that only one invoice is paid for the refund. + /// See [Avoiding Duplicate Payments] for other requirements once the payment has been sent. + /// + /// The builder will have the provided expiration set. Any changes to the expiration on the + /// returned builder will not be honored by [`ChannelManager`]. For `no-std`, the highest seen + /// block time minus two hours is used for the current time when determining if the refund has + /// expired. + /// + /// To revoke the refund, use [`ChannelManager::abandon_payment`] prior to receiving the + /// invoice. If abandoned, or an invoice isn't received before expiration, the payment will fail + /// with an [`Event::InvoiceRequestFailed`]. + /// + /// If `max_total_routing_fee_msat` is not specified, The default from + /// [`RouteParameters::from_payment_params_and_value`] is applied. + /// + /// # Privacy + /// + /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as + /// the introduction node and a derived payer id for payer privacy. As such, currently, the + /// node must be announced. Otherwise, there is no way to find a path to the introduction node + /// in order to send the [`Bolt12Invoice`]. + /// + /// # Limitations + /// + /// Requires a direct connection to an introduction node in the responding + /// [`Bolt12Invoice::payment_paths`]. + /// + /// # Errors + /// + /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link + /// or if `amount_msats` is invalid. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// + /// [`Refund`]: crate::offers::refund::Refund + /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice + /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths + pub fn create_refund_builder( + &self, description: String, amount_msats: u64, absolute_expiry: Duration, + payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option + ) -> Result, Bolt12SemanticError> { + let node_id = self.get_our_node_id(); + let expanded_key = &self.inbound_payment_key; + let entropy = &*self.entropy_source; + let secp_ctx = &self.secp_ctx; + let path = self.create_one_hop_blinded_path(); + + let builder = RefundBuilder::deriving_payer_id( + description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id + )? + .chain_hash(self.chain_hash) + .absolute_expiry(absolute_expiry) + .path(path); + + let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry); + self.pending_outbound_payments + .add_new_awaiting_invoice( + payment_id, expiration, retry_strategy, max_total_routing_fee_msat, + ) + .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?; + + Ok(builder) + } + + /// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and + /// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual + /// [`Bolt12Invoice`] once it is received. + /// + /// Uses [`InvoiceRequestBuilder`] such that the [`InvoiceRequest`] it builds is recognized by + /// the [`ChannelManager`] when handling a [`Bolt12Invoice`] message in response to the request. + /// The optional parameters are used in the builder, if `Some`: + /// - `quantity` for [`InvoiceRequest::quantity`] which must be set if + /// [`Offer::expects_quantity`] is `true`. + /// - `amount_msats` if overpaying what is required for the given `quantity` is desired, and + /// - `payer_note` for [`InvoiceRequest::payer_note`]. + /// + /// If `max_total_routing_fee_msat` is not specified, The default from + /// [`RouteParameters::from_payment_params_and_value`] is applied. + /// + /// # Payment + /// + /// The provided `payment_id` is used to ensure that only one invoice is paid for the request + /// when received. See [Avoiding Duplicate Payments] for other requirements once the payment has + /// been sent. + /// + /// To revoke the request, use [`ChannelManager::abandon_payment`] prior to receiving the + /// invoice. If abandoned, or an invoice isn't received in a reasonable amount of time, the + /// payment will fail with an [`Event::InvoiceRequestFailed`]. + /// + /// # Privacy + /// + /// Uses a one-hop [`BlindedPath`] for the reply path with [`ChannelManager::get_our_node_id`] + /// as the introduction node and a derived payer id for payer privacy. As such, currently, the + /// node must be announced. Otherwise, there is no way to find a path to the introduction node + /// in order to send the [`Bolt12Invoice`]. + /// + /// # Limitations + /// + /// Requires a direct connection to an introduction node in [`Offer::paths`] or to + /// [`Offer::signing_pubkey`], if empty. A similar restriction applies to the responding + /// [`Bolt12Invoice::payment_paths`]. + /// + /// # Errors + /// + /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link + /// or if the provided parameters are invalid for the offer. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + /// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity + /// [`InvoiceRequest::payer_note`]: crate::offers::invoice_request::InvoiceRequest::payer_note + /// [`InvoiceRequestBuilder`]: crate::offers::invoice_request::InvoiceRequestBuilder + /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice + /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths + /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments + pub fn pay_for_offer( + &self, offer: &Offer, quantity: Option, amount_msats: Option, + payer_note: Option, payment_id: PaymentId, retry_strategy: Retry, + max_total_routing_fee_msat: Option + ) -> Result<(), Bolt12SemanticError> { + let expanded_key = &self.inbound_payment_key; + let entropy = &*self.entropy_source; + let secp_ctx = &self.secp_ctx; + + let builder = offer + .request_invoice_deriving_payer_id(expanded_key, entropy, secp_ctx, payment_id)? + .chain_hash(self.chain_hash)?; + let builder = match quantity { + None => builder, + Some(quantity) => builder.quantity(quantity)?, + }; + let builder = match amount_msats { + None => builder, + Some(amount_msats) => builder.amount_msats(amount_msats)?, + }; + let builder = match payer_note { + None => builder, + Some(payer_note) => builder.payer_note(payer_note), + }; + + let invoice_request = builder.build_and_sign()?; + let reply_path = self.create_one_hop_blinded_path(); + + let expiration = StaleExpiration::TimerTicks(1); + self.pending_outbound_payments + .add_new_awaiting_invoice( + payment_id, expiration, retry_strategy, max_total_routing_fee_msat + ) + .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?; + + let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); + if offer.paths().is_empty() { + let message = new_pending_onion_message( + OffersMessage::InvoiceRequest(invoice_request), + Destination::Node(offer.signing_pubkey()), + Some(reply_path), + ); + pending_offers_messages.push(message); + } else { + // Send as many invoice requests as there are paths in the offer (with an upper bound). + // Using only one path could result in a failure if the path no longer exists. But only + // one invoice for a given payment id will be paid, even if more than one is received. + const REQUEST_LIMIT: usize = 10; + for path in offer.paths().into_iter().take(REQUEST_LIMIT) { + let message = new_pending_onion_message( + OffersMessage::InvoiceRequest(invoice_request.clone()), + Destination::BlindedPath(path.clone()), + Some(reply_path.clone()), + ); + pending_offers_messages.push(message); + } + } + + Ok(()) + } + + /// Creates a [`Bolt12Invoice`] for a [`Refund`] and enqueues it to be sent via an onion + /// message. + /// + /// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a + /// [`BlindedPath`] containing the [`PaymentSecret`] needed to reconstruct the corresponding + /// [`PaymentPreimage`]. + /// + /// # Limitations + /// + /// Requires a direct connection to an introduction node in [`Refund::paths`] or to + /// [`Refund::payer_id`], if empty. This request is best effort; an invoice will be sent to each + /// node meeting the aforementioned criteria, but there's no guarantee that they will be + /// received and no retries will be made. + /// + /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice + pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> { + let expanded_key = &self.inbound_payment_key; + let entropy = &*self.entropy_source; + let secp_ctx = &self.secp_ctx; + + let amount_msats = refund.amount_msats(); + let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32; + + match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) { + Ok((payment_hash, payment_secret)) => { + let payment_paths = vec![ + self.create_one_hop_blinded_payment_path(payment_secret), + ]; + #[cfg(not(feature = "no-std"))] + let builder = refund.respond_using_derived_keys( + payment_paths, payment_hash, expanded_key, entropy + )?; + #[cfg(feature = "no-std")] + let created_at = Duration::from_secs( + self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + ); + #[cfg(feature = "no-std")] + let builder = refund.respond_using_derived_keys_no_std( + payment_paths, payment_hash, created_at, expanded_key, entropy + )?; + let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?; + let reply_path = self.create_one_hop_blinded_path(); + + let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); + if refund.paths().is_empty() { + let message = new_pending_onion_message( + OffersMessage::Invoice(invoice), + Destination::Node(refund.payer_id()), + Some(reply_path), + ); + pending_offers_messages.push(message); + } else { + for path in refund.paths() { + let message = new_pending_onion_message( + OffersMessage::Invoice(invoice.clone()), + Destination::BlindedPath(path.clone()), + Some(reply_path.clone()), + ); + pending_offers_messages.push(message); + } + } + + Ok(()) + }, + Err(()) => Err(Bolt12SemanticError::InvalidAmount), } } @@ -6986,6 +7693,37 @@ where inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key) } + /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction + /// node. + fn create_one_hop_blinded_path(&self) -> BlindedPath { + let entropy_source = self.entropy_source.deref(); + let secp_ctx = &self.secp_ctx; + BlindedPath::one_hop_for_message(self.get_our_node_id(), entropy_source, secp_ctx).unwrap() + } + + /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction + /// node. + fn create_one_hop_blinded_payment_path( + &self, payment_secret: PaymentSecret + ) -> (BlindedPayInfo, BlindedPath) { + let entropy_source = self.entropy_source.deref(); + let secp_ctx = &self.secp_ctx; + + let payee_node_id = self.get_our_node_id(); + let max_cltv_expiry = self.best_block.read().unwrap().height() + LATENCY_GRACE_PERIOD_BLOCKS; + let payee_tlvs = ReceiveTlvs { + payment_secret, + payment_constraints: PaymentConstraints { + max_cltv_expiry, + htlc_minimum_msat: 1, + }, + }; + // TODO: Err for overflow? + BlindedPath::one_hop_for_payment( + payee_node_id, payee_tlvs, entropy_source, secp_ctx + ).unwrap() + } + /// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids /// are used when constructing the phantom invoice's route hints. /// @@ -6994,7 +7732,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { - let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); + let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source); // Ensure the generated scid doesn't conflict with a real channel. match short_to_chan_info.get(&scid_candidate) { Some(_) => continue, @@ -7024,7 +7762,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { - let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source); + let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source); // Ensure the generated scid doesn't conflict with a real channel. if short_to_chan_info.contains_key(&scid_candidate) { continue } return scid_candidate @@ -7287,7 +8025,7 @@ where *best_block = BestBlock::new(header.prev_blockhash, new_height) } - self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); + self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)); } } @@ -7313,13 +8051,13 @@ where let _persistence_guard = PersistenceNotifierGuard::optionally_notify_skipping_background_events( self, || -> NotifyOption { NotifyOption::DoPersist }); - self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger) + self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger) .map(|(a, b)| (a, Vec::new(), b))); let last_best_block_height = self.best_block.read().unwrap().height(); if height < last_best_block_height { let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire); - self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); + self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)); } } @@ -7336,7 +8074,7 @@ where self, || -> NotifyOption { NotifyOption::DoPersist }); *self.best_block.write().unwrap() = BestBlock::new(block_hash, height); - self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)); + self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)); macro_rules! max_time { ($timestamp: expr) => { @@ -7456,7 +8194,7 @@ where msg: announcement_sigs, }); if let Some(height) = height_opt { - if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) { + if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.chain_hash, height, &self.default_configuration) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: announcement, // Note that announcement_signatures fails if the channel cannot be announced, @@ -7495,10 +8233,12 @@ where self.issue_channel_close_events(&channel.context, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.context.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { - channel_id: channel.context.channel_id(), - data: reason_message, - } }, + action: msgs::ErrorAction::DisconnectPeer { + msg: Some(msgs::ErrorMessage { + channel_id: channel.context.channel_id(), + data: reason_message, + }) + }, }); return false; } @@ -7589,35 +8329,41 @@ where self.best_block.read().unwrap().clone() } - /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by + /// Fetches the set of [`NodeFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub fn node_features(&self) -> NodeFeatures { provided_node_features(&self.default_configuration) } - /// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by + /// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] - pub fn invoice_features(&self) -> Bolt11InvoiceFeatures { - provided_invoice_features(&self.default_configuration) + pub fn bolt11_invoice_features(&self) -> Bolt11InvoiceFeatures { + provided_bolt11_invoice_features(&self.default_configuration) + } + + /// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by + /// [`ChannelManager`]. + fn bolt12_invoice_features(&self) -> Bolt12InvoiceFeatures { + provided_bolt12_invoice_features(&self.default_configuration) } - /// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by + /// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub fn channel_features(&self) -> ChannelFeatures { provided_channel_features(&self.default_configuration) } - /// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by + /// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub fn channel_type_features(&self) -> ChannelTypeFeatures { provided_channel_type_features(&self.default_configuration) } - /// Fetches the set of [`InitFeatures`] flags which are provided by or required by + /// Fetches the set of [`InitFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub fn init_features(&self) -> InitFeatures { provided_init_features(&self.default_configuration) @@ -7822,7 +8568,6 @@ where fn peer_disconnected(&self, counterparty_node_id: &PublicKey) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify( self, || NotifyOption::SkipPersistHandleEvents); - let mut failed_channels = Vec::new(); let mut per_peer_state = self.per_peer_state.write().unwrap(); let remove_peer = { @@ -7835,24 +8580,24 @@ where peer_state.channel_by_id.retain(|_, phase| { let context = match phase { ChannelPhase::Funded(chan) => { - chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); - // We only retain funded channels that are not shutdown. - if !chan.is_shutdown() { + if chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger).is_ok() { + // We only retain funded channels that are not shutdown. return true; } - &chan.context + &mut chan.context }, // Unfunded channels will always be removed. ChannelPhase::UnfundedOutboundV1(chan) => { - &chan.context + &mut chan.context }, ChannelPhase::UnfundedInboundV1(chan) => { - &chan.context + &mut chan.context }, }; // Clean up for removal. update_maps_on_chan_removal!(self, &context); self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer); + failed_channels.push(context.force_shutdown(false)); false }); // Note that we don't bother generating any events for pre-accept channels - @@ -7911,7 +8656,7 @@ where mem::drop(per_peer_state); for failure in failed_channels.drain(..) { - self.finish_force_close_channel(failure); + self.finish_close_channel(failure); } } @@ -8067,7 +8812,7 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) { - if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, msg, @@ -8090,8 +8835,8 @@ where provided_init_features(&self.default_configuration) } - fn get_genesis_hashes(&self) -> Option> { - Some(vec![ChainHash::from(&self.genesis_hash[..])]) + fn get_chain_hashes(&self) -> Option> { + Some(vec![self.chain_hash]) } fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) { @@ -8149,7 +8894,128 @@ where } } -/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by +impl +OffersMessageHandler for ChannelManager +where + M::Target: chain::Watch<::Signer>, + T::Target: BroadcasterInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + R::Target: Router, + L::Target: Logger, +{ + fn handle_message(&self, message: OffersMessage) -> Option { + let secp_ctx = &self.secp_ctx; + let expanded_key = &self.inbound_payment_key; + + match message { + OffersMessage::InvoiceRequest(invoice_request) => { + let amount_msats = match InvoiceBuilder::::amount_msats( + &invoice_request + ) { + Ok(amount_msats) => Some(amount_msats), + Err(error) => return Some(OffersMessage::InvoiceError(error.into())), + }; + let invoice_request = match invoice_request.verify(expanded_key, secp_ctx) { + Ok(invoice_request) => invoice_request, + Err(()) => { + let error = Bolt12SemanticError::InvalidMetadata; + return Some(OffersMessage::InvoiceError(error.into())); + }, + }; + let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32; + + match self.create_inbound_payment(amount_msats, relative_expiry, None) { + Ok((payment_hash, payment_secret)) if invoice_request.keys.is_some() => { + let payment_paths = vec![ + self.create_one_hop_blinded_payment_path(payment_secret), + ]; + #[cfg(not(feature = "no-std"))] + let builder = invoice_request.respond_using_derived_keys( + payment_paths, payment_hash + ); + #[cfg(feature = "no-std")] + let created_at = Duration::from_secs( + self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + ); + #[cfg(feature = "no-std")] + let builder = invoice_request.respond_using_derived_keys_no_std( + payment_paths, payment_hash, created_at + ); + match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) { + Ok(invoice) => Some(OffersMessage::Invoice(invoice)), + Err(error) => Some(OffersMessage::InvoiceError(error.into())), + } + }, + Ok((payment_hash, payment_secret)) => { + let payment_paths = vec![ + self.create_one_hop_blinded_payment_path(payment_secret), + ]; + #[cfg(not(feature = "no-std"))] + let builder = invoice_request.respond_with(payment_paths, payment_hash); + #[cfg(feature = "no-std")] + let created_at = Duration::from_secs( + self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + ); + #[cfg(feature = "no-std")] + let builder = invoice_request.respond_with_no_std( + payment_paths, payment_hash, created_at + ); + let response = builder.and_then(|builder| builder.allow_mpp().build()) + .map_err(|e| OffersMessage::InvoiceError(e.into())) + .and_then(|invoice| + match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) { + Ok(invoice) => Ok(OffersMessage::Invoice(invoice)), + Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError( + InvoiceError::from_string("Failed signing invoice".to_string()) + )), + Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError( + InvoiceError::from_string("Failed invoice signature verification".to_string()) + )), + }); + match response { + Ok(invoice) => Some(invoice), + Err(error) => Some(error), + } + }, + Err(()) => { + Some(OffersMessage::InvoiceError(Bolt12SemanticError::InvalidAmount.into())) + }, + } + }, + OffersMessage::Invoice(invoice) => { + match invoice.verify(expanded_key, secp_ctx) { + Err(()) => { + Some(OffersMessage::InvoiceError(InvoiceError::from_string("Unrecognized invoice".to_owned()))) + }, + Ok(_) if invoice.invoice_features().requires_unknown_bits_from(&self.bolt12_invoice_features()) => { + Some(OffersMessage::InvoiceError(Bolt12SemanticError::UnknownRequiredFeatures.into())) + }, + Ok(payment_id) => { + if let Err(e) = self.send_payment_for_bolt12_invoice(&invoice, payment_id) { + log_trace!(self.logger, "Failed paying invoice: {:?}", e); + Some(OffersMessage::InvoiceError(InvoiceError::from_string(format!("{:?}", e)))) + } else { + None + } + }, + } + }, + OffersMessage::InvoiceError(invoice_error) => { + log_trace!(self.logger, "Received invoice_error: {}", invoice_error); + None + }, + } + } + + fn release_pending_messages(&self) -> Vec> { + core::mem::take(&mut self.pending_offers_messages.lock().unwrap()) + } +} + +/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { let mut node_features = provided_init_features(config).to_context(); @@ -8157,29 +9023,35 @@ pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { node_features } -/// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by +/// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] -pub(crate) fn provided_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures { +pub(crate) fn provided_bolt11_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures { + provided_init_features(config).to_context() +} + +/// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by +/// [`ChannelManager`]. +pub(crate) fn provided_bolt12_invoice_features(config: &UserConfig) -> Bolt12InvoiceFeatures { provided_init_features(config).to_context() } -/// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by +/// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures { provided_init_features(config).to_context() } -/// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by +/// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures { ChannelTypeFeatures::from_init(&provided_init_features(config)) } -/// Fetches the set of [`InitFeatures`] flags which are provided by or required by +/// Fetches the set of [`InitFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub fn provided_init_features(config: &UserConfig) -> InitFeatures { // Note that if new features are added here which other peers may (eventually) require, we @@ -8240,7 +9112,7 @@ impl Writeable for ChannelDetails { (10, self.channel_value_satoshis, required), (12, self.unspendable_punishment_reserve, option), (14, user_channel_id_low, required), - (16, self.next_outbound_htlc_limit_msat, required), // Forwards compatibility for removed balance_msat field. + (16, self.balance_msat, required), (18, self.outbound_capacity_msat, required), (19, self.next_outbound_htlc_limit_msat, required), (20, self.inbound_capacity_msat, required), @@ -8276,7 +9148,7 @@ impl Readable for ChannelDetails { (10, channel_value_satoshis, required), (12, unspendable_punishment_reserve, option), (14, user_channel_id_low, required), - (16, _balance_msat, option), // Backwards compatibility for removed balance_msat field. + (16, balance_msat, required), (18, outbound_capacity_msat, required), // Note that by the time we get past the required read above, outbound_capacity_msat will be // filled in, so we can safely unwrap it here. @@ -8302,8 +9174,6 @@ impl Readable for ChannelDetails { let user_channel_id = user_channel_id_low as u128 + ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64); - let _balance_msat: Option = _balance_msat; - Ok(Self { inbound_scid_alias, channel_id: channel_id.0.unwrap(), @@ -8316,6 +9186,7 @@ impl Readable for ChannelDetails { channel_value_satoshis: channel_value_satoshis.0.unwrap(), unspendable_punishment_reserve, user_channel_id, + balance_msat: balance_msat.0.unwrap(), outbound_capacity_msat: outbound_capacity_msat.0.unwrap(), next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(), @@ -8637,7 +9508,7 @@ where write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); - self.genesis_hash.write(writer)?; + self.chain_hash.write(writer)?; { let best_block = self.best_block.read().unwrap(); best_block.height().write(writer)?; @@ -8656,7 +9527,7 @@ where } number_of_funded_channels += peer_state.channel_by_id.iter().filter( - |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_initiated() } else { false } + |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false } ).count(); } @@ -8667,7 +9538,7 @@ where let peer_state = &mut *peer_state_lock; for channel in peer_state.channel_by_id.iter().filter_map( |(_, phase)| if let ChannelPhase::Funded(channel) = phase { - if channel.context.is_funding_initiated() { Some(channel) } else { None } + if channel.context.is_funding_broadcast() { Some(channel) } else { None } } else { None } ) { channel.write(writer)?; @@ -9048,7 +9919,7 @@ where fn read(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); - let genesis_hash: BlockHash = Readable::read(reader)?; + let chain_hash: ChainHash = Readable::read(reader)?; let best_block_height: u32 = Readable::read(reader)?; let best_block_hash: BlockHash = Readable::read(reader)?; @@ -9091,7 +9962,10 @@ where log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } - let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true); + let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true); + if batch_funding_txid.is_some() { + return Err(DecodeError::InvalidValue); + } if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update @@ -9131,7 +10005,7 @@ where if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } - if channel.context.is_funding_initiated() { + if channel.context.is_funding_broadcast() { id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id()); } match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) { @@ -9693,7 +10567,7 @@ where let mut outbound_scid_alias; loop { outbound_scid_alias = fake_scid::Namespace::OutboundAlias - .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); + .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); if outbound_scid_aliases.insert(outbound_scid_alias) { break; } } chan.context.set_outbound_scid_alias(outbound_scid_alias); @@ -9786,6 +10660,9 @@ where Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), .. } = action { if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) { + log_trace!(args.logger, + "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", + blocked_channel_outpoint.to_channel_id()); blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates .entry(blocked_channel_outpoint.to_channel_id()) .or_insert_with(Vec::new).push(blocking_action.clone()); @@ -9797,6 +10674,9 @@ where // anymore. } } + if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action { + debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue"); + } } } peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; @@ -9807,7 +10687,7 @@ where } let channel_manager = ChannelManager { - genesis_hash, + chain_hash, fee_estimator: bounded_fee_estimator, chain_monitor: args.chain_monitor, tx_broadcaster: args.tx_broadcaster, @@ -9845,6 +10725,10 @@ where event_persist_notifier: Notifier::new(), needs_persist_flag: AtomicBool::new(false), + funding_batch_states: Mutex::new(BTreeMap::new()), + + pending_offers_messages: Mutex::new(Vec::new()), + entropy_source: args.entropy_source, node_signer: args.node_signer, signer_provider: args.signer_provider, @@ -9865,7 +10749,7 @@ where // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), - downstream_closed, downstream_node_id, downstream_funding); + downstream_closed, true, downstream_node_id, downstream_funding); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -10564,6 +11448,16 @@ mod tests { check_api_error_message(expected_message, res_err) } + fn check_channel_unavailable_error(res_err: Result, expected_channel_id: ChannelId, peer_node_id: PublicKey) { + let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id); + check_api_error_message(expected_message, res_err) + } + + fn check_api_misuse_error(res_err: Result) { + let expected_message = "No such channel awaiting to be accepted.".to_string(); + check_api_error_message(expected_message, res_err) + } + fn check_api_error_message(expected_err_message: String, res_err: Result) { match res_err { Err(APIError::APIMisuseError { err }) => { @@ -10608,6 +11502,36 @@ mod tests { check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key); } + #[test] + fn test_api_calls_with_unavailable_channel() { + // Tests that our API functions that expects a `counterparty_node_id` and a `channel_id` + // as input, behaves as expected if the `counterparty_node_id` is a known peer in the + // `ChannelManager::per_peer_state` map, but the peer state doesn't contain a channel with + // the given `channel_id`. + let chanmon_cfg = create_chanmon_cfgs(2); + let node_cfg = create_node_cfgs(2, &chanmon_cfg); + let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); + let nodes = create_network(2, &node_cfg, &node_chanmgr); + + let counterparty_node_id = nodes[1].node.get_our_node_id(); + + // Dummy values + let channel_id = ChannelId::from_bytes([4; 32]); + + // Test the API functions. + check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42)); + + check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id); + } + #[test] fn test_connection_limiting() { // Test that we limit un-channel'd peers and un-funded channels properly. @@ -10868,6 +11792,30 @@ mod tests { sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok()); } + #[test] + fn test_final_incorrect_cltv(){ + let chanmon_cfg = create_chanmon_cfgs(1); + let node_cfg = create_node_cfgs(1, &chanmon_cfg); + let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]); + let node = create_network(1, &node_cfg, &node_chanmgr); + + let result = node[0].node.construct_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive { + amt_msat: 100, + outgoing_cltv_value: 22, + payment_metadata: None, + keysend_preimage: None, + payment_data: Some(msgs::FinalOnionHopData { + payment_secret: PaymentSecret([0; 32]), total_msat: 100, + }), + custom_tlvs: Vec::new(), + }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None); + + // Should not return an error as this condition: + // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334 + // is not satisfied. + assert!(result.is_ok()); + } + #[test] fn test_inbound_anchors_manual_acceptance() { // Tests that we properly limit inbound channels when we have the manual-channel-acceptance @@ -11033,6 +11981,67 @@ mod tests { let payment_preimage = PaymentPreimage([42; 32]); assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"); } + + #[test] + fn test_trigger_lnd_force_close() { + let chanmon_cfg = create_chanmon_cfgs(2); + let node_cfg = create_node_cfgs(2, &chanmon_cfg); + let user_config = test_default_channel_config(); + let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]); + let nodes = create_network(2, &node_cfg, &node_chanmgr); + + // Open a channel, immediately disconnect each other, and broadcast Alice's latest state. + let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap(); + check_closed_broadcast(&nodes[0], 1, true); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + { + let txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 1); + check_spends!(txn[0], funding_tx); + } + + // Since they're disconnected, Bob won't receive Alice's `Error` message. Reconnect them + // such that Bob sends a `ChannelReestablish` to Alice since the channel is still open from + // their side. + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let channel_reestablish = get_event_msg!( + nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id() + ); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &channel_reestablish); + + // Alice should respond with an error since the channel isn't known, but a bogus + // `ChannelReestablish` should be sent first, such that we actually trigger Bob to force + // close even if it was an lnd node. + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert_eq!(msg.next_local_commitment_number, 0); + assert_eq!(msg.next_remote_commitment_number, 0); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &msg); + } else { panic!() }; + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); + let expected_close_reason = ClosureReason::ProcessingError { + err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string() + }; + check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000); + { + let txn = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 1); + check_spends!(txn[0], funding_tx); + } + } } #[cfg(ldk_bench)] @@ -11197,7 +12206,7 @@ pub mod bench { macro_rules! send_payment { ($node_a: expr, $node_b: expr) => { let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features($node_b.invoice_features()).unwrap(); + .with_bolt11_features($node_b.bolt11_invoice_features()).unwrap(); let mut payment_preimage = PaymentPreimage([0; 32]); payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes()); payment_count += 1;