// construct one themselves.
use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
+pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
use crate::ln::wire::Encode;
-use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, InvoiceBuilder};
+use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
use crate::offers::invoice_error::InvoiceError;
+use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder};
use crate::offers::merkle::SignError;
-use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
+use crate::offers::offer::{Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
use crate::util::errors::APIError;
#[cfg(not(c_bindings))]
use {
+ crate::offers::offer::DerivedMetadata,
crate::routing::router::DefaultRouter,
crate::routing::gossip::NetworkGraph,
crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
crate::sign::KeysManager,
};
+#[cfg(c_bindings)]
+use {
+ crate::offers::offer::OfferWithDerivedMetadataBuilder,
+ crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
+};
use alloc::collections::{btree_map, BTreeMap};
/// For HTLCs received by LDK, these will ultimately bubble back up as
/// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
+ /// Set if this HTLC is the final hop in a multi-hop blinded path.
+ requires_blinded_error: bool,
},
}
match self {
Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
+ Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
_ => None,
}
}
if require_disconnected && self.is_connected {
return false
}
- self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
+ !self.channel_by_id.iter().any(|(_, phase)|
+ match phase {
+ ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
+ ChannelPhase::UnfundedInboundV1(_) => false,
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedOutboundV2(_) => true,
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedInboundV2(_) => false,
+ }
+ )
&& self.monitor_update_blocked_actions.is_empty()
&& self.in_flight_monitor_updates.is_empty()
}
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
+ Arc<KeysManager>,
Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
&'e DefaultRouter<
&'f NetworkGraph<&'g L>,
&'g L,
+ &'c KeysManager,
&'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, L> { self }
}
-/// Manager which keeps track of a number of channels and sends messages to the appropriate
-/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
+/// A lightning node's channel state machine and payment management logic, which facilitates
+/// sending, forwarding, and receiving payments through lightning channels.
+///
+/// [`ChannelManager`] is parameterized by a number of components to achieve this.
+/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
+/// channel
+/// - [`BroadcasterInterface`] for broadcasting transactions related to opening, funding, and
+/// closing channels
+/// - [`EntropySource`] for providing random data needed for cryptographic operations
+/// - [`NodeSigner`] for cryptographic operations scoped to the node
+/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
+/// - [`FeeEstimator`] to determine transaction fee rates needed to have a transaction mined in a
+/// timely manner
+/// - [`Router`] for finding payment paths when initiating and retrying payments
+/// - [`Logger`] for logging operational information of varying degrees
+///
+/// Additionally, it implements the following traits:
+/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
+/// - [`MessageSendEventsProvider`] to similarly send such messages to peers
+/// - [`OffersMessageHandler`] for BOLT 12 message handling and sending
+/// - [`EventsProvider`] to generate user-actionable [`Event`]s
+/// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity
+///
+/// Thus, [`ChannelManager`] is typically used to parameterize a [`MessageHandler`] and an
+/// [`OnionMessenger`]. The latter is required to support BOLT 12 functionality.
+///
+/// # `ChannelManager` vs `ChannelMonitor`
+///
+/// It's important to distinguish between the *off-chain* management and *on-chain* enforcement of
+/// lightning channels. [`ChannelManager`] exchanges messages with peers to manage the off-chain
+/// state of each channel. During this process, it generates a [`ChannelMonitor`] for each channel
+/// and a [`ChannelMonitorUpdate`] for each relevant change, notifying its parameterized
+/// [`chain::Watch`] of them.
+///
+/// An implementation of [`chain::Watch`], such as [`ChainMonitor`], is responsible for aggregating
+/// these [`ChannelMonitor`]s and applying any [`ChannelMonitorUpdate`]s to them. It then monitors
+/// for any pertinent on-chain activity, enforcing claims as needed.
+///
+/// This division of off-chain management and on-chain enforcement allows for interesting node
+/// setups. For instance, on-chain enforcement could be moved to a separate host or have added
+/// redundancy, possibly as a watchtower. See [`chain::Watch`] for the relevant interface.
///
-/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through
-/// to individual Channels.
+/// # Initialization
+///
+/// Use [`ChannelManager::new`] with the most recent [`BlockHash`] when creating a fresh instance.
+/// Otherwise, if restarting, construct [`ChannelManagerReadArgs`] with the necessary parameters and
+/// references to any deserialized [`ChannelMonitor`]s that were previously persisted. Use this to
+/// deserialize the [`ChannelManager`] and feed it any new chain data since it was last online, as
+/// detailed in the [`ChannelManagerReadArgs`] documentation.
+///
+/// ```
+/// use bitcoin::BlockHash;
+/// use bitcoin::network::constants::Network;
+/// use lightning::chain::BestBlock;
+/// # use lightning::chain::channelmonitor::ChannelMonitor;
+/// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs};
+/// # use lightning::routing::gossip::NetworkGraph;
+/// use lightning::util::config::UserConfig;
+/// use lightning::util::ser::ReadableArgs;
+///
+/// # fn read_channel_monitors() -> Vec<ChannelMonitor<lightning::sign::InMemorySigner>> { vec![] }
+/// # fn example<
+/// # 'a,
+/// # L: lightning::util::logger::Logger,
+/// # ES: lightning::sign::EntropySource,
+/// # S: for <'b> lightning::routing::scoring::LockableScore<'b, ScoreLookUp = SL>,
+/// # SL: lightning::routing::scoring::ScoreLookUp<ScoreParams = SP>,
+/// # SP: Sized,
+/// # R: lightning::io::Read,
+/// # >(
+/// # fee_estimator: &dyn lightning::chain::chaininterface::FeeEstimator,
+/// # chain_monitor: &dyn lightning::chain::Watch<lightning::sign::InMemorySigner>,
+/// # tx_broadcaster: &dyn lightning::chain::chaininterface::BroadcasterInterface,
+/// # router: &lightning::routing::router::DefaultRouter<&NetworkGraph<&'a L>, &'a L, &ES, &S, SP, SL>,
+/// # logger: &L,
+/// # entropy_source: &ES,
+/// # node_signer: &dyn lightning::sign::NodeSigner,
+/// # signer_provider: &lightning::sign::DynSignerProvider,
+/// # best_block: lightning::chain::BestBlock,
+/// # current_timestamp: u32,
+/// # mut reader: R,
+/// # ) -> Result<(), lightning::ln::msgs::DecodeError> {
+/// // Fresh start with no channels
+/// let params = ChainParameters {
+/// network: Network::Bitcoin,
+/// best_block,
+/// };
+/// let default_config = UserConfig::default();
+/// let channel_manager = ChannelManager::new(
+/// fee_estimator, chain_monitor, tx_broadcaster, router, logger, entropy_source, node_signer,
+/// signer_provider, default_config, params, current_timestamp
+/// );
+///
+/// // Restart from deserialized data
+/// let mut channel_monitors = read_channel_monitors();
+/// let args = ChannelManagerReadArgs::new(
+/// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
+/// router, logger, default_config, channel_monitors.iter_mut().collect()
+/// );
+/// let (block_hash, channel_manager) =
+/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
+///
+/// // Update the ChannelManager and ChannelMonitors with the latest chain data
+/// // ...
+///
+/// // Move the monitors to the ChannelManager's chain::Watch parameter
+/// for monitor in channel_monitors {
+/// chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
+/// }
+/// # Ok(())
+/// # }
+/// ```
+///
+/// # Operation
+///
+/// The following is required for [`ChannelManager`] to function properly:
+/// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically
+/// called by [`PeerManager::read_event`] when processing network I/O)
+/// - Send messages to peers obtained via its [`MessageSendEventsProvider`] implementation
+/// (typically initiated when [`PeerManager::process_events`] is called)
+/// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation
+/// as documented by those traits
+/// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly
+/// every minute
+/// - Persist to disk whenever [`get_and_clear_needs_persistence`] returns `true` using a
+/// [`Persister`] such as a [`KVStore`] implementation
+/// - Handle [`Event`]s obtained via its [`EventsProvider`] implementation
+///
+/// The [`Future`] returned by [`get_event_or_persistence_needed_future`] is useful in determining
+/// when the last two requirements need to be checked.
+///
+/// The [`lightning-block-sync`] and [`lightning-transaction-sync`] crates provide utilities that
+/// simplify feeding in on-chain activity using the [`chain::Listen`] and [`chain::Confirm`] traits,
+/// respectively. The remaining requirements can be met using the [`lightning-background-processor`]
+/// crate. For languages other than Rust, the availability of similar utilities may vary.
+///
+/// # Persistence
///
/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
/// all peers during write/read (though does not modify this instance, only the instance being
/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
///
+/// # `ChannelUpdate` Messages
+///
/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
///
+/// # DoS Mitigation
+///
/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
/// inbound channels without confirmed funding transactions. This may result in nodes which we do
/// not have a channel with being unable to connect to us or open new channels with us if we have
/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
/// never limited. Please ensure you limit the count of such channels yourself.
///
+/// # Type Aliases
+///
/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
/// you're using lightning-net-tokio.
///
+/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
+/// [`MessageHandler`]: crate::ln::peer_handler::MessageHandler
+/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
+/// [`PeerManager::read_event`]: crate::ln::peer_handler::PeerManager::read_event
+/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+/// [`timer_tick_occurred`]: Self::timer_tick_occurred
+/// [`get_and_clear_needs_persistence`]: Self::get_and_clear_needs_persistence
+/// [`Persister`]: crate::util::persist::Persister
+/// [`KVStore`]: crate::util::persist::KVStore
+/// [`get_event_or_persistence_needed_future`]: Self::get_event_or_persistence_needed_future
+/// [`lightning-block-sync`]: https://docs.rs/lightning_block_sync/latest/lightning_block_sync
+/// [`lightning-transaction-sync`]: https://docs.rs/lightning_transaction_sync/latest/lightning_transaction_sync
+/// [`lightning-background-processor`]: https://docs.rs/lightning_background_processor/lightning_background_processor
/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
/// [`funding_created`]: msgs::FundingCreated
/// [`funding_transaction_generated`]: Self::funding_transaction_generated
/// [`BlockHash`]: bitcoin::hash_types::BlockHash
/// [`update_channel`]: chain::Watch::update_channel
/// [`ChannelUpdate`]: msgs::ChannelUpdate
-/// [`timer_tick_occurred`]: Self::timer_tick_occurred
/// [`read`]: ReadableArgs::read
//
// Lock order:
///
/// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
pub config: Option<ChannelConfig>,
+ /// Pending inbound HTLCs.
+ ///
+ /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
+ pub pending_inbound_htlcs: Vec<InboundHTLCDetails>,
+ /// Pending outbound HTLCs.
+ ///
+ /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
+ pub pending_outbound_htlcs: Vec<OutboundHTLCDetails>,
}
impl ChannelDetails {
inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
config: Some(context.config()),
channel_shutdown_state: Some(context.shutdown_state()),
+ pending_inbound_htlcs: context.get_pending_inbound_htlc_details(),
+ pending_outbound_htlcs: context.get_pending_outbound_htlc_details(),
}
}
}
ChannelPhase::UnfundedInboundV1(channel) => {
convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
},
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedOutboundV2(channel) => {
+ convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
+ },
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedInboundV2(channel) => {
+ convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
+ },
}
};
}
counterparty_node_id: $channel.context.get_counterparty_node_id(),
user_channel_id: $channel.context.get_user_id(),
funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+ channel_type: Some($channel.context.get_channel_type().clone()),
}, None));
$channel.context.set_channel_pending_event_emitted();
}
let logger = WithChannelContext::from(&$self.logger, &$chan.context);
let mut updates = $chan.monitor_updating_restored(&&logger,
&$self.node_signer, $self.chain_hash, &$self.default_configuration,
- $self.best_block.read().unwrap().height());
+ $self.best_block.read().unwrap().height);
let counterparty_node_id = $chan.context.get_counterparty_node_id();
let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
// We only send a channel_update in the case where we are just now sending a
best_block: RwLock::new(params.best_block),
- outbound_scid_aliases: Mutex::new(HashSet::new()),
- pending_inbound_payments: Mutex::new(HashMap::new()),
+ outbound_scid_aliases: Mutex::new(new_hash_set()),
+ pending_inbound_payments: Mutex::new(new_hash_map()),
pending_outbound_payments: OutboundPayments::new(),
- forward_htlcs: Mutex::new(HashMap::new()),
- claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
- pending_intercepted_htlcs: Mutex::new(HashMap::new()),
- outpoint_to_peer: Mutex::new(HashMap::new()),
- short_to_chan_info: FairRwLock::new(HashMap::new()),
+ forward_htlcs: Mutex::new(new_hash_map()),
+ claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
+ pending_intercepted_htlcs: Mutex::new(new_hash_map()),
+ outpoint_to_peer: Mutex::new(new_hash_map()),
+ short_to_chan_info: FairRwLock::new(new_hash_map()),
our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
secp_ctx,
highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
- per_peer_state: FairRwLock::new(HashMap::new()),
+ per_peer_state: FairRwLock::new(new_hash_map()),
pending_events: Mutex::new(VecDeque::new()),
pending_events_processor: AtomicBool::new(false),
}
fn create_and_insert_outbound_scid_alias(&self) -> u64 {
- let height = self.best_block.read().unwrap().height();
+ let height = self.best_block.read().unwrap().height;
let mut outbound_scid_alias = 0;
let mut i = 0;
loop {
let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
their_features, channel_value_satoshis, push_msat, user_channel_id, config,
- self.best_block.read().unwrap().height(), outbound_scid_alias, temporary_channel_id)
+ self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id)
{
Ok(res) => res,
Err(e) => {
// the same channel.
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
// the same channel.
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
/// Gets the list of channels we have with a given counterparty, in random order.
pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
// Unfunded channel has no update
(None, chan_phase.context().get_counterparty_node_id())
},
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
+ self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
+ // Unfunded channel has no update
+ (None, chan_phase.context().get_counterparty_node_id())
+ },
}
} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
log_error!(logger, "Force-closing channel {}", &channel_id);
/// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
/// `counterparty_node_id` isn't the counterparty of the corresponding channel.
///
- /// You can always get the latest local transaction(s) to broadcast from
- /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
+ /// You can always broadcast the latest local transaction(s) via
+ /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
-> Result<(), APIError> {
self.force_close_sending_error(channel_id, counterparty_node_id, false)
None
};
- let cur_height = self.best_block.read().unwrap().height() + 1;
+ let cur_height = self.best_block.read().unwrap().height + 1;
if let Err((err_msg, code)) = check_incoming_htlc_cltv(
cur_height, outgoing_cltv_value, msg.cltv_expiry
match decoded_hop {
onion_utils::Hop::Receive(next_hop_data) => {
// OUR PAYMENT!
- let current_height: u32 = self.best_block.read().unwrap().height();
+ let current_height: u32 = self.best_block.read().unwrap().height;
match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
current_height, self.default_configuration.accept_mpp_keysend)
/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment_with_route(route, payment_hash, recipient_onion, payment_id,
/// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
/// `route_params` and retry failed payment paths based on `retry_strategy`.
pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
#[cfg(test)]
pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
#[cfg(test)]
pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
}
}
pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment_for_bolt12_invoice(
///
/// [`send_payment`]: Self::send_payment
pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment_with_route(
route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
///
/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
/// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
/// us to easily discern them from real payments.
pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
&self.entropy_source, &self.node_signer, best_block_height,
ProbeSendFailure::RouteNotFound
})?;
- let mut used_liquidity_map = HashMap::with_capacity(first_hops.len());
+ let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
let mut res = Vec::new();
}));
}
{
- let height = self.best_block.read().unwrap().height();
+ let height = self.best_block.read().unwrap().height;
// Transactions are evaluated as final by network mempools if their locktime is strictly
// lower than the next block height. However, the modules constituting our Lightning
// node might not have perfect sync about their blockchain views. Thus, if the wallet
let mut failed_forwards = Vec::new();
let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
- let mut forward_htlcs = HashMap::new();
+ let mut forward_htlcs = new_hash_map();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs {
};
match next_hop {
onion_utils::Hop::Receive(hop_data) => {
- let current_height: u32 = self.best_block.read().unwrap().height();
+ let current_height: u32 = self.best_block.read().unwrap().height;
match create_recv_pending_htlc_info(hop_data,
incoming_shared_secret, payment_hash, outgoing_amt_msat,
outgoing_cltv_value, Some(phantom_shared_secret), false, None,
(incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
Some(payment_data), phantom_shared_secret, onion_fields)
},
- PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => {
+ PendingHTLCRouting::ReceiveKeysend {
+ payment_data, payment_preimage, payment_metadata,
+ incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _
+ } => {
let onion_fields = RecipientOnionFields {
payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
payment_metadata,
debug_assert!(!committed_to_claimable);
let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(
- &self.best_block.read().unwrap().height().to_be_bytes(),
+ &self.best_block.read().unwrap().height.to_be_bytes(),
);
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: $htlc.prev_hop.short_channel_id,
}
};
if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
- let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64;
+ let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
if (cltv_expiry as u64) < expected_min_expiry_height {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
&payment_hash, cltv_expiry, expected_min_expiry_height);
}
}
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
- if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
- log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.",
- chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- }
return NotifyOption::SkipPersistNoEvents;
}
if !chan.context.is_live() {
process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
pending_msg_events, counterparty_node_id)
},
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedInboundV2(chan) => {
+ process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+ pending_msg_events, counterparty_node_id)
+ },
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedOutboundV2(chan) => {
+ process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+ pending_msg_events, counterparty_node_id)
+ },
}
});
FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
- htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+ htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
},
FailureCode::InvalidOnionPayload(data) => {
if !valid_mpp {
for htlc in sources.drain(..) {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
- htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+ htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
let receiver = HTLCDestination::FailedPayment { payment_hash };
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
- next_channel_outpoint: OutPoint, next_channel_id: ChannelId,
+ next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_channel_id = hop_data.channel_id;
+ let prev_user_channel_id = hop_data.user_channel_id;
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
- #[cfg(debug_assertions)]
- let claiming_channel_id = hop_data.channel_id;
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
BackgroundEvent::MonitorUpdatesComplete {
channel_id, ..
} =>
- *channel_id == claiming_channel_id,
+ *channel_id == prev_channel_id,
}
}), "{:?}", *background_events);
}
"skimmed_fee_msat must always be included in total_fee_earned_msat");
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
- total_fee_earned_msat,
- claim_from_onchain_tx: from_onchain,
prev_channel_id: Some(prev_channel_id),
next_channel_id: Some(next_channel_id),
- outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+ prev_user_channel_id,
+ next_user_channel_id,
+ total_fee_earned_msat,
skimmed_fee_msat,
+ claim_from_onchain_tx: from_onchain,
+ outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
})
// TODO: Once we can rely on the counterparty_node_id from the
// monitor event, this and the outpoint_to_peer map should be removed.
let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
- match outpoint_to_peer.get(&funding_txo) {
+ match outpoint_to_peer.get(funding_txo) {
Some(cp_id) => cp_id.clone(),
None => return,
}
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let channel =
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
chan
} else {
let update_actions = peer_state.monitor_update_blocked_actions
// happening and return an error. N.B. that we create channel with an outbound SCID of zero so
// that we can delay allocating the SCID until after we're sure that the checks below will
// succeed.
- let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
+ let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
Some(unaccepted_channel) => {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
&unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
- &self.logger, accept_0conf).map_err(|e| {
- let err_str = e.to_string();
- log_error!(logger, "{}", err_str);
-
- APIError::ChannelUnavailable { err: err_str }
- })
- }
+ &self.logger, accept_0conf).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id))
+ },
_ => {
let err_str = "No such channel awaiting to be accepted.".to_owned();
log_error!(logger, "{}", err_str);
- Err(APIError::APIMisuseError { err: err_str })
+ return Err(APIError::APIMisuseError { err: err_str });
}
- }?;
+ };
- if accept_0conf {
- // This should have been correctly configured by the call to InboundV1Channel::new.
- debug_assert!(channel.context.minimum_depth().unwrap() == 0);
- } else if channel.context.get_channel_type().requires_zero_conf() {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+ match res {
+ Err(err) => {
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+ match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
+ Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
+ Err(e) => {
+ return Err(APIError::ChannelUnavailable { err: e.err });
+ },
}
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
- log_error!(logger, "{}", err_str);
+ }
+ Ok(mut channel) => {
+ if accept_0conf {
+ // This should have been correctly configured by the call to InboundV1Channel::new.
+ debug_assert!(channel.context.minimum_depth().unwrap() == 0);
+ } else if channel.context.get_channel_type().requires_zero_conf() {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+ log_error!(logger, "{}", err_str);
- return Err(APIError::APIMisuseError { err: err_str });
- } else {
- // If this peer already has some channels, a new channel won't increase our number of peers
- // with unfunded channels, so as long as we aren't over the maximum number of unfunded
- // channels per-peer we can accept channels from a peer with existing ones.
- if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
- }
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
- log_error!(logger, "{}", err_str);
+ return Err(APIError::APIMisuseError { err: err_str });
+ } else {
+ // If this peer already has some channels, a new channel won't increase our number of peers
+ // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+ // channels per-peer we can accept channels from a peer with existing ones.
+ if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+ log_error!(logger, "{}", err_str);
- return Err(APIError::APIMisuseError { err: err_str });
- }
- }
+ return Err(APIError::APIMisuseError { err: err_str });
+ }
+ }
- // Now that we know we have a channel, assign an outbound SCID alias.
- let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
- channel.context.set_outbound_scid_alias(outbound_scid_alias);
+ // Now that we know we have a channel, assign an outbound SCID alias.
+ let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+ channel.context.set_outbound_scid_alias(outbound_scid_alias);
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: channel.context.get_counterparty_node_id(),
- msg: channel.accept_inbound_channel(),
- });
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg: channel.accept_inbound_channel(),
+ });
- peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
+ peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
- Ok(())
+ Ok(())
+ },
+ }
}
/// Gets the number of peers which match the given filter and do not have any funded, outbound,
fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
where Filter: Fn(&PeerState<SP>) -> bool {
let mut peers_without_funded_channels = 0;
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
{
let peer_state_lock = self.per_peer_state.read().unwrap();
for (_, peer_mtx) in peer_state_lock.iter() {
num_unfunded_channels += 1;
}
},
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedInboundV2(chan) => {
+ // Only inbound V2 channels that are not 0conf and that we do not contribute to will be
+ // included in the unfunded count.
+ if chan.context.minimum_depth().unwrap_or(1) != 0 &&
+ chan.dual_funding_context.our_funding_satoshis == 0 {
+ num_unfunded_channels += 1;
+ }
+ },
ChannelPhase::UnfundedOutboundV1(_) => {
// Outbound channels don't contribute to the unfunded count in the DoS context.
continue;
+ },
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedOutboundV2(_) => {
+ // Outbound channels don't contribute to the unfunded count in the DoS context.
+ continue;
}
}
}
msg.common_fields.temporary_channel_id.clone()));
}
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
// If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
if self.default_configuration.manually_accept_inbound_channels {
let channel_type = channel::channel_type_from_open_channel(
- &msg, &peer_state.latest_features, &self.channel_type_features()
+ &msg.common_fields, &peer_state.latest_features, &self.channel_type_features()
).map_err(|e|
MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id)
)?;
let mut chan = remove_channel_phase!(self, chan_phase_entry);
finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
},
+ // TODO(dual_funding): Combine this match arm with above.
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
+ let context = phase.context_mut();
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+ let mut chan = remove_channel_phase!(self, chan_phase_entry);
+ finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
+ },
}
} else {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
+ let next_user_channel_id;
let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
// outbound HTLC is claimed. This is guaranteed to all complete before we
// process the RAA as messages are processed from single peers serially.
funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
+ next_user_channel_id = chan.context.get_user_id();
res
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
};
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
- funding_txo, msg.channel_id
+ funding_txo, msg.channel_id, Some(next_user_channel_id),
);
Ok(())
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: try_chan_phase_entry!(self, chan.announcement_signatures(
- &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
+ &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
msg, &self.default_configuration
), chan_phase_entry),
// Note that announcement_signatures fails if the channel cannot be announced,
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
self.claim_funds_internal(htlc_update.source, preimage,
htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
- false, counterparty_node_id, funding_outpoint, channel_id);
+ false, counterparty_node_id, funding_outpoint, channel_id, None);
} else {
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
}
},
- MonitorEvent::HolderForceClosed(_funding_outpoint) => {
+ MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
- failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
+ reason
+ } else {
+ ClosureReason::HolderForceClosed
+ };
+ failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
- msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
+ msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() })
},
});
}
self.finish_close_channel(failure);
}
}
+}
+macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
/// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
/// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
/// not have an expiration unless otherwise set on the builder.
/// [`Offer`]: crate::offers::offer::Offer
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
pub fn create_offer_builder(
- &self, description: String
- ) -> Result<OfferBuilder<DerivedMetadata, secp256k1::All>, Bolt12SemanticError> {
- let node_id = self.get_our_node_id();
- let expanded_key = &self.inbound_payment_key;
- let entropy = &*self.entropy_source;
- let secp_ctx = &self.secp_ctx;
-
- let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ &$self, description: String
+ ) -> Result<$builder, Bolt12SemanticError> {
+ let node_id = $self.get_our_node_id();
+ let expanded_key = &$self.inbound_payment_key;
+ let entropy = &*$self.entropy_source;
+ let secp_ctx = &$self.secp_ctx;
+
+ let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = OfferBuilder::deriving_signing_pubkey(
description, node_id, expanded_key, entropy, secp_ctx
)
- .chain_hash(self.chain_hash)
+ .chain_hash($self.chain_hash)
.path(path);
- Ok(builder)
+ Ok(builder.into())
}
+} }
+macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
/// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
/// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund.
///
/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
pub fn create_refund_builder(
- &self, description: String, amount_msats: u64, absolute_expiry: Duration,
+ &$self, description: String, amount_msats: u64, absolute_expiry: Duration,
payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
- ) -> Result<RefundBuilder<secp256k1::All>, Bolt12SemanticError> {
- let node_id = self.get_our_node_id();
- let expanded_key = &self.inbound_payment_key;
- let entropy = &*self.entropy_source;
- let secp_ctx = &self.secp_ctx;
+ ) -> Result<$builder, Bolt12SemanticError> {
+ let node_id = $self.get_our_node_id();
+ let expanded_key = &$self.inbound_payment_key;
+ let entropy = &*$self.entropy_source;
+ let secp_ctx = &$self.secp_ctx;
- let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = RefundBuilder::deriving_payer_id(
description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
)?
- .chain_hash(self.chain_hash)
+ .chain_hash($self.chain_hash)
.absolute_expiry(absolute_expiry)
.path(path);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
+
let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
- self.pending_outbound_payments
+ $self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
- Ok(builder)
+ Ok(builder.into())
}
+} }
+
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ #[cfg(not(c_bindings))]
+ create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
+ #[cfg(not(c_bindings))]
+ create_refund_builder!(self, RefundBuilder<secp256k1::All>);
+
+ #[cfg(c_bindings)]
+ create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
+ #[cfg(c_bindings)]
+ create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
/// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and
/// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual
/// Errors if:
/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
/// - the provided parameters are invalid for the offer,
+ /// - the offer is for an unsupported chain, or
/// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
/// request.
///
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
- let builder = offer
+ let builder: InvoiceRequestBuilder<DerivedPayerId, secp256k1::All> = offer
.request_invoice_deriving_payer_id(expanded_key, entropy, secp_ctx, payment_id)?
- .chain_hash(self.chain_hash)?;
+ .into();
+ let builder = builder.chain_hash(self.chain_hash)?;
+
let builder = match quantity {
None => builder,
Some(quantity) => builder.quantity(quantity)?,
let invoice_request = builder.build_and_sign()?;
let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
let expiration = StaleExpiration::TimerTicks(1);
self.pending_outbound_payments
.add_new_awaiting_invoice(
///
/// # Errors
///
- /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
- /// path for the invoice.
+ /// Errors if:
+ /// - the refund is for an unsupported chain, or
+ /// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for
+ /// the invoice.
///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
let amount_msats = refund.amount_msats();
let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+ if refund.chain() != self.chain_hash {
+ return Err(Bolt12SemanticError::UnsupportedChain);
+ }
+
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
Ok((payment_hash, payment_secret)) => {
let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
let builder = refund.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at, expanded_key, entropy
)?;
+ let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
let reply_path = self.create_blinded_path()
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
/// Errors if the `MessageRouter` errors or returns an empty `Vec`.
fn create_blinded_path(&self) -> Result<BlindedPath, ()> {
let recipient = self.get_our_node_id();
- let entropy_source = self.entropy_source.deref();
let secp_ctx = &self.secp_ctx;
let peers = self.per_peer_state.read().unwrap()
.collect::<Vec<_>>();
self.router
- .create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+ .create_blinded_paths(recipient, peers, secp_ctx)
.and_then(|paths| paths.into_iter().next().ok_or(()))
}
fn create_blinded_payment_paths(
&self, amount_msats: u64, payment_secret: PaymentSecret
) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
- let entropy_source = self.entropy_source.deref();
let secp_ctx = &self.secp_ctx;
let first_hops = self.list_usable_channels();
let payee_node_id = self.get_our_node_id();
- let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY
+ let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
+ LATENCY_GRACE_PERIOD_BLOCKS;
let payee_tlvs = ReceiveTlvs {
payment_secret,
},
};
self.router.create_blinded_payment_paths(
- payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx
+ payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
)
}
///
/// [phantom node payments]: crate::sign::PhantomKeysManager
pub fn get_phantom_scid(&self) -> u64 {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
/// Note that this method is not guaranteed to return unique values, you may need to call it a few
/// times to get a unique scid.
pub fn get_intercept_scid(&self) -> u64 {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
{
let best_block = self.best_block.read().unwrap();
- assert_eq!(best_block.block_hash(), header.prev_blockhash,
+ assert_eq!(best_block.block_hash, header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
- assert_eq!(best_block.height(), height - 1,
+ assert_eq!(best_block.height, height - 1,
"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
}
let new_height = height - 1;
{
let mut best_block = self.best_block.write().unwrap();
- assert_eq!(best_block.block_hash(), header.block_hash(),
+ assert_eq!(best_block.block_hash, header.block_hash(),
"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
- assert_eq!(best_block.height(), height,
+ assert_eq!(best_block.height, height,
"Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
.map(|(a, b)| (a, Vec::new(), b)));
- let last_best_block_height = self.best_block.read().unwrap().height();
+ let last_best_block_height = self.best_block.read().unwrap().height;
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
match phase {
// Retain unfunded channels.
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true,
+ // TODO(dual_funding): Combine this match arm with above.
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
ChannelPhase::Funded(channel) => {
let res = f(channel);
if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
}
/// Returns true if this [`ChannelManager`] needs to be persisted.
+ ///
+ /// See [`Self::get_event_or_persistence_needed_future`] for retrieving a [`Future`] that
+ /// indicates this should be checked.
pub fn get_and_clear_needs_persistence(&self) -> bool {
self.needs_persist_flag.swap(false, Ordering::AcqRel)
}
}
&mut chan.context
},
- // Unfunded channels will always be removed.
- ChannelPhase::UnfundedOutboundV1(chan) => {
- &mut chan.context
+ // We retain UnfundedOutboundV1 channel for some time in case
+ // peer unexpectedly disconnects, and intends to reconnect again.
+ ChannelPhase::UnfundedOutboundV1(_) => {
+ return true;
},
+ // Unfunded inbound channels will always be removed.
ChannelPhase::UnfundedInboundV1(chan) => {
&mut chan.context
},
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedOutboundV2(chan) => {
+ &mut chan.context
+ },
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedInboundV2(chan) => {
+ &mut chan.context
+ },
};
// Clean up for removal.
update_maps_on_chan_removal!(self, &context);
return NotifyOption::SkipPersistNoEvents;
}
e.insert(Mutex::new(PeerState {
- channel_by_id: HashMap::new(),
- inbound_channel_request_by_id: HashMap::new(),
+ channel_by_id: new_hash_map(),
+ inbound_channel_request_by_id: new_hash_map(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
let mut peer_state = e.get().lock().unwrap();
peer_state.latest_features = init_msg.features.clone();
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
if inbound_peer_limited &&
Self::unfunded_channel_count(&*peer_state, best_block_height) ==
peer_state.channel_by_id.len()
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
- if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
- ).for_each(|chan| {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
- pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
- node_id: chan.context.get_counterparty_node_id(),
- msg: chan.get_channel_reestablish(&&logger),
- });
- });
+ for (_, phase) in peer_state.channel_by_id.iter_mut() {
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_channel_reestablish(&&logger),
+ });
+ }
+
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_open_channel(self.chain_hash),
+ });
+ }
+
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedOutboundV2(chan) => {
+ pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_open_channel_v2(self.chain_hash),
+ });
+ },
+
+ ChannelPhase::UnfundedInboundV1(_) => {
+ // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
+ // they are not persisted and won't be recovered after a crash.
+ // Therefore, they shouldn't exist at this point.
+ debug_assert!(false);
+ }
+
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
+ #[cfg(dual_funding)]
+ ChannelPhase::UnfundedInboundV2(channel) => {
+ // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
+ // they are not persisted and won't be recovered after a crash.
+ // Therefore, they shouldn't exist at this point.
+ debug_assert!(false);
+ },
+ }
+ }
}
return NotifyOption::SkipPersistHandleEvents;
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-
match &msg.data as &str {
"cannot co-op close channel w/ active htlcs"|
"link failed to shutdown" =>
// We're not going to bother handling this in a sensible way, instead simply
// repeating the Shutdown message on repeat until morale improves.
if !msg.channel_id.is_zero() {
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if peer_state_mutex_opt.is_none() { return; }
- let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
- if let Some(msg) = chan.get_outbound_shutdown() {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
- peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: *counterparty_node_id,
- action: msgs::ErrorAction::SendWarningMessage {
- msg: msgs::WarningMessage {
- channel_id: msg.channel_id,
- data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
- },
- log_level: Level::Trace,
+ PersistenceNotifierGuard::optionally_notify(
+ self,
+ || -> NotifyOption {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
+ let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
+ if let Some(msg) = chan.get_outbound_shutdown() {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
+ peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: *counterparty_node_id,
+ action: msgs::ErrorAction::SendWarningMessage {
+ msg: msgs::WarningMessage {
+ channel_id: msg.channel_id,
+ data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
+ },
+ log_level: Level::Trace,
+ }
+ });
+ // This can happen in a fairly tight loop, so we absolutely cannot trigger
+ // a `ChannelManager` write here.
+ return NotifyOption::SkipPersistHandleEvents;
}
- });
- }
+ NotifyOption::SkipPersistNoEvents
+ }
+ );
}
return;
}
_ => {}
}
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
if msg.channel_id.is_zero() {
let channel_ids: Vec<ChannelId> = {
let per_peer_state = self.per_peer_state.read().unwrap();
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
- if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
- node_id: *counterparty_node_id,
- msg,
- });
- return;
- }
+ match peer_state.channel_by_id.get_mut(&msg.channel_id) {
+ Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ return;
+ }
+ },
+ #[cfg(dual_funding)]
+ Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ return;
+ }
+ },
+ None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::Funded(_)) => (),
+ #[cfg(dual_funding)]
+ Some(ChannelPhase::UnfundedInboundV2(_)) => (),
}
}
let builder = invoice_request.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at
);
+ let builder: Result<InvoiceBuilder<DerivedSigningPubkey>, _> =
+ builder.map(|b| b.into());
match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
Err(error) => Some(OffersMessage::InvoiceError(error.into())),
let builder = invoice_request.respond_with_no_std(
payment_paths, payment_hash, created_at
);
+ let builder: Result<InvoiceBuilder<ExplicitSigningPubkey>, _> =
+ builder.map(|b| b.into());
let response = builder.and_then(|builder| builder.allow_mpp().build())
.map_err(|e| OffersMessage::InvoiceError(e.into()))
- .and_then(|invoice|
- match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
+ .and_then(|invoice| {
+ #[cfg(c_bindings)]
+ let mut invoice = invoice;
+ match invoice.sign(|invoice: &UnsignedBolt12Invoice|
+ self.node_signer.sign_bolt12_invoice(invoice)
+ ) {
Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
- Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
+ Err(SignError::Signing) => Err(OffersMessage::InvoiceError(
InvoiceError::from_string("Failed signing invoice".to_string())
)),
Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
InvoiceError::from_string("Failed invoice signature verification".to_string())
)),
- });
+ }
+ });
match response {
Ok(invoice) => Some(invoice),
Err(error) => Some(error),
(37, user_channel_id_high_opt, option),
(39, self.feerate_sat_per_1000_weight, option),
(41, self.channel_shutdown_state, option),
+ (43, self.pending_inbound_htlcs, optional_vec),
+ (45, self.pending_outbound_htlcs, optional_vec),
});
Ok(())
}
(37, user_channel_id_high_opt, option),
(39, feerate_sat_per_1000_weight, option),
(41, channel_shutdown_state, option),
+ (43, pending_inbound_htlcs, optional_vec),
+ (45, pending_outbound_htlcs, optional_vec),
});
// `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
inbound_htlc_maximum_msat,
feerate_sat_per_1000_weight,
channel_shutdown_state,
+ pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()),
+ pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()),
})
}
}
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
+ (1, requires_blinded_error, (default_value, false)),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(4, payment_data, option), // Added in 0.0.116
self.chain_hash.write(writer)?;
{
let best_block = self.best_block.read().unwrap();
- best_block.height().write(writer)?;
- best_block.block_hash().write(writer)?;
+ best_block.height.write(writer)?;
+ best_block.block_hash.write(writer)?;
}
let mut serializable_peer_count: u64 = 0;
}
// Encode without retry info for 0.0.101 compatibility.
- let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
+ let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
for (id, outbound) in pending_outbound_payments.iter() {
match outbound {
PendingOutboundPayment::Legacy { session_privs } |
for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
if !updates.is_empty() {
- if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); }
+ if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
}
}
mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>) -> Self {
Self {
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
- channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
+ channel_monitors: hash_map_from_iter(
+ channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
+ ),
}
}
}
let mut failed_htlcs = Vec::new();
let channel_count: u64 = Readable::read(reader)?;
- let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
- let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
+ let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
+ let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
+ let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = VecDeque::new();
let mut close_background_events = Vec::new();
- let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize);
+ let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
for _ in 0..channel_count {
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
},
hash_map::Entry::Vacant(entry) => {
- let mut by_id_map = HashMap::new();
+ let mut by_id_map = new_hash_map();
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
entry.insert(by_id_map);
}
const MAX_ALLOC_SIZE: usize = 1024 * 64;
let forward_htlcs_count: u64 = Readable::read(reader)?;
- let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
+ let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
for _ in 0..forward_htlcs_count {
let short_channel_id = Readable::read(reader)?;
let pending_forwards_count: u64 = Readable::read(reader)?;
let peer_state_from_chans = |channel_by_id| {
PeerState {
channel_by_id,
- inbound_channel_request_by_id: HashMap::new(),
+ inbound_channel_request_by_id: new_hash_map(),
latest_features: InitFeatures::empty(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
};
let peer_count: u64 = Readable::read(reader)?;
- let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
+ let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
for _ in 0..peer_count {
let peer_pubkey = Readable::read(reader)?;
- let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
+ let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
let mut peer_state = peer_state_from_chans(peer_chans);
peer_state.latest_features = Readable::read(reader)?;
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
let highest_seen_timestamp: u32 = Readable::read(reader)?;
let pending_inbound_payment_count: u64 = Readable::read(reader)?;
- let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
+ let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
for _ in 0..pending_inbound_payment_count {
if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
return Err(DecodeError::InvalidValue);
let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
- HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
+ hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
for _ in 0..pending_outbound_payments_count_compat {
let session_priv = Readable::read(reader)?;
let payment = PendingOutboundPayment::Legacy {
- session_privs: [session_priv].iter().cloned().collect()
+ session_privs: hash_set_from_iter([session_priv]),
};
if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
return Err(DecodeError::InvalidValue)
// pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
let mut pending_outbound_payments = None;
- let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(HashMap::new());
+ let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
let mut received_network_pubkey: Option<PublicKey> = None;
let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
let mut claimable_htlc_onion_fields = None;
- let mut pending_claiming_payments = Some(HashMap::new());
+ let mut pending_claiming_payments = Some(new_hash_map());
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
let mut events_override = None;
let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
- let mut outbounds = HashMap::new();
+ let mut outbounds = new_hash_map();
for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
// still open, we need to replay any monitor updates that are for closed channels,
// creating the neccessary peer_state entries as we go.
let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
- Mutex::new(peer_state_from_chans(HashMap::new()))
+ Mutex::new(peer_state_from_chans(new_hash_map()))
});
let mut peer_state = peer_state_mutex.lock().unwrap();
handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
retry_strategy: None,
attempts: PaymentAttempts::new(),
payment_params: None,
- session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
+ session_privs: hash_set_from_iter([session_priv_bytes]),
payment_hash: htlc.payment_hash,
payment_secret: None, // only used for retries, and we'll never retry on startup
payment_metadata: None, // only used for retries, and we'll never retry on startup
let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
- let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len());
+ let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
if let Some(purposes) = claimable_htlc_purposes {
if purposes.len() != claimable_htlcs_list.len() {
return Err(DecodeError::InvalidValue);
}
}
- let mut outbound_scid_aliases = HashSet::new();
+ let mut outbound_scid_aliases = new_hash_set();
for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
downstream_counterparty_and_funding_outpoint:
Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
} = action {
- if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+ if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
blocked_channel_id);
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
- downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
+ downstream_closed, true, downstream_node_id, downstream_funding,
+ downstream_channel_id, None
+ );
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
}
let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
}
fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
};
// Check that if the amount we received + the penultimate hop extra fee is less than the sender
// intended amount, we fail the payment.
- let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height;
if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
}),
custom_tlvs: Vec::new(),
};
- let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height;
assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
current_height, node[0].node.default_configuration.accept_mpp_keysend).is_ok());
let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
let node = create_network(1, &node_cfg, &node_chanmgr);
- let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height;
let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
sender_intended_htlc_amt_msat: 100,
cltv_expiry_height: 22,
let (scid_1, scid_2) = (42, 43);
- let mut forward_htlcs = HashMap::new();
+ let mut forward_htlcs = new_hash_map();
forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
- let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
+ let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);