use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::constants::{genesis_block, ChainHash};
+use bitcoin::blockdata::constants::ChainHash;
use bitcoin::network::constants::Network;
use bitcoin::hashes::Hash;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::{LockTime, secp256k1, Sequence};
+use crate::blinded_path::BlindedPath;
use crate::chain;
use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs};
use crate::ln::wire::Encode;
+use crate::offers::offer::{DerivedMetadata, OfferBuilder};
+use crate::offers::parse::Bolt12SemanticError;
+use crate::offers::refund::RefundBuilder;
use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
use crate::util::wakers::{Future, Notifier};
use crate::util::logger::{Level, Logger};
use crate::util::errors::APIError;
-use alloc::collections::BTreeMap;
+use alloc::collections::{btree_map, BTreeMap};
use crate::io;
use crate::prelude::*;
}
#[inline]
fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+ let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
+ let action = if shutdown_res.monitor_update.is_some() {
+ // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
+ // should disconnect our peer such that we force them to broadcast their latest
+ // commitment upon reconnecting.
+ msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
+ } else {
+ msgs::ErrorAction::SendErrorMessage { msg: err_msg }
+ };
Self {
- err: LightningError {
- err: err.clone(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage {
- channel_id,
- data: err
- },
- },
- },
+ err: LightningError { err, action },
chan_id: Some((channel_id, user_channel_id)),
shutdown_finish: Some((shutdown_res, channel_update)),
channel_capacity: Some(channel_capacity)
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
- Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
+ Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
>>,
&'e DefaultRouter<
&'f NetworkGraph<&'g L>,
&'g L,
- &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
+ &'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
>,
>;
/// A trivial trait which describes any [`ChannelManager`].
+///
+/// This is not exported to bindings users as general cover traits aren't useful in other
+/// languages.
pub trait AChannelManager {
/// A type implementing [`chain::Watch`].
type Watch: chain::Watch<Self::Signer> + ?Sized;
L::Target: Logger,
{
default_configuration: UserConfig,
- genesis_hash: BlockHash,
+ chain_hash: ChainHash,
fee_estimator: LowerBoundedFeeEstimator<F>,
chain_monitor: M,
tx_broadcaster: T,
/// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
/// Notifier the lock contains sends out a notification when the lock is released.
total_consistency_lock: RwLock<()>,
+ /// Tracks the progress of channels going through batch funding by whether funding_signed was
+ /// received and the monitor has been persisted.
+ ///
+ /// This information does not need to be persisted as funding nodes can forget
+ /// unfunded channels upon disconnection.
+ funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
background_events_processed_since_startup: AtomicBool,
}
/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
-///
-/// Balances of a channel are available through [`ChainMonitor::get_claimable_balances`] and
-/// [`ChannelMonitor::get_claimable_balances`], calculated with respect to the corresponding on-chain
-/// transactions.
-///
-/// [`ChainMonitor::get_claimable_balances`]: crate::chain::chainmonitor::ChainMonitor::get_claimable_balances
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
///
/// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
pub feerate_sat_per_1000_weight: Option<u32>,
+ /// Our total balance. This is the amount we would get if we close the channel.
+ /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
+ /// amount is not likely to be recoverable on close.
+ ///
+ /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose
+ /// balance is not available for inclusion in new outbound HTLCs). This further does not include
+ /// any pending outgoing HTLCs which are awaiting some other resolution to be sent.
+ /// This does not consider any on-chain fees.
+ ///
+ /// See also [`ChannelDetails::outbound_capacity_msat`]
+ pub balance_msat: u64,
/// The available outbound capacity for sending HTLCs to the remote peer. This does not include
/// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new outbound HTLCs). This further does not include any pending
/// outgoing HTLCs which are awaiting some other resolution to be sent.
///
+ /// See also [`ChannelDetails::balance_msat`]
+ ///
/// This value is not exact. Due to various in-flight changes, feerate changes, and our
/// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
/// should be able to spend nearly this amount.
/// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
/// to use a limit as close as possible to the HTLC limit we can currently send.
///
- /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`] and
- /// [`ChannelDetails::outbound_capacity_msat`].
+ /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
+ /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
pub next_outbound_htlc_limit_msat: u64,
/// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
/// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
channel_value_satoshis: context.get_value_satoshis(),
feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()),
unspendable_punishment_reserve: to_self_reserve_satoshis,
+ balance_msat: balance.balance_msat,
inbound_capacity_msat: balance.inbound_capacity_msat,
outbound_capacity_msat: balance.outbound_capacity_msat,
next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
let mut msg_events = Vec::with_capacity(2);
if let Some((shutdown_res, update_option)) = shutdown_finish {
- $self.finish_force_close_channel(shutdown_res);
+ $self.finish_close_channel(shutdown_res);
if let Some(update) = update_option {
msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
macro_rules! handle_monitor_update_completion {
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
let mut updates = $chan.monitor_updating_restored(&$self.logger,
- &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
+ &$self.node_signer, $self.chain_hash, &$self.default_configuration,
$self.best_block.read().unwrap().height());
let counterparty_node_id = $chan.context.get_counterparty_node_id();
let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
}
let channel_id = $chan.context.channel_id();
+ let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid();
core::mem::drop($peer_state_lock);
core::mem::drop($per_peer_state_lock);
+ // If the channel belongs to a batch funding transaction, the progress of the batch
+ // should be updated as we have received funding_signed and persisted the monitor.
+ if let Some(txid) = unbroadcasted_batch_funding_txid {
+ let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
+ let mut batch_completed = false;
+ if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
+ let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
+ *chan_id == channel_id &&
+ *pubkey == counterparty_node_id
+ ));
+ if let Some(channel_state) = channel_state {
+ channel_state.2 = true;
+ } else {
+ debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
+ }
+ batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
+ } else {
+ debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
+ }
+
+ // When all channels in a batched funding transaction have become ready, it is not necessary
+ // to track the progress of the batch anymore and the state of the channels can be updated.
+ if batch_completed {
+ let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
+ let per_peer_state = $self.per_peer_state.read().unwrap();
+ let mut batch_funding_tx = None;
+ for (channel_id, counterparty_node_id, _) in removed_batch_state {
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state = peer_state_mutex.lock().unwrap();
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
+ batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding());
+ chan.set_batch_ready();
+ let mut pending_events = $self.pending_events.lock().unwrap();
+ emit_channel_pending_event!(pending_events, chan);
+ }
+ }
+ }
+ if let Some(tx) = batch_funding_tx {
+ log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.txid());
+ $self.tx_broadcaster.broadcast_transactions(&[&tx]);
+ }
+ }
+ }
+
$self.handle_monitor_update_completion_actions(update_actions);
if let Some(forwards) = htlc_forwards {
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
ChannelManager {
default_configuration: config.clone(),
- genesis_hash: genesis_block(params.network).header.block_hash(),
+ chain_hash: ChainHash::using_genesis_block(params.network),
fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
chain_monitor,
tx_broadcaster,
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
-
event_persist_notifier: Notifier::new(),
needs_persist_flag: AtomicBool::new(false),
+ funding_batch_states: Mutex::new(BTreeMap::new()),
entropy_source,
node_signer,
if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
outbound_scid_alias += 1;
} else {
- outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
+ outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
}
if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
break;
},
}
};
- let res = channel.get_open_channel(self.genesis_hash.clone());
+ let res = channel.get_open_channel(self.chain_hash);
let temporary_channel_id = channel.context.channel_id();
match peer_state.channel_by_id.entry(temporary_channel_id) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
- let mut shutdown_result = None;
+ let shutdown_result;
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let funding_txo_opt = chan.context.get_funding_txo();
let their_features = &peer_state.latest_features;
- let (shutdown_msg, mut monitor_update_opt, htlcs) =
+ let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
failed_htlcs = htlcs;
+ shutdown_result = local_shutdown_result;
+ debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
// We can send the `shutdown` message before updating the `ChannelMonitor`
// here as we don't need the monitor update to complete until we send a
});
}
self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
- shutdown_result = Some((None, Vec::new()));
}
}
break;
// it does not exist for this peer. Either way, we can attempt to force-close it.
//
// An appropriate error will be returned for non-existence of the channel if that's the case.
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
},
}
}
if let Some(shutdown_result) = shutdown_result {
- self.finish_force_close_channel(shutdown_result);
+ self.finish_close_channel(shutdown_result);
}
Ok(())
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
}
- fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
+ fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
#[cfg(debug_assertions)]
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
- let (monitor_update_option, mut failed_htlcs) = shutdown_res;
- log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
- for htlc_source in failed_htlcs.drain(..) {
+ log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+ for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
+ if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
// ignore the result here.
let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
}
+ let mut shutdown_results = Vec::new();
+ if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
+ let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
+ let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let mut has_uncompleted_channel = None;
+ for (channel_id, counterparty_node_id, state) in affected_channels {
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state = peer_state_mutex.lock().unwrap();
+ if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
+ update_maps_on_chan_removal!(self, &chan.context());
+ self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
+ shutdown_results.push(chan.context_mut().force_shutdown(false));
+ }
+ }
+ has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
+ }
+ debug_assert!(
+ has_uncompleted_channel.unwrap_or(true),
+ "Closing a batch where all channels have completed initial monitor update",
+ );
+ }
+ for shutdown_result in shutdown_results.drain(..) {
+ self.finish_close_channel(shutdown_result);
+ }
}
/// `peer_msg` should be set when we receive a message from a peer, but not set when the
mem::drop(per_peer_state);
match chan_phase {
ChannelPhase::Funded(mut chan) => {
- self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
+ self.finish_close_channel(chan.context.force_shutdown(broadcast));
(self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
},
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
- self.finish_force_close_channel(chan_phase.context_mut().force_shutdown(false));
+ self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
// Unfunded channel has no update
(None, chan_phase.context().get_counterparty_node_id())
},
peer_state.pending_msg_events.push(
events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+ action: msgs::ErrorAction::DisconnectPeer {
+ msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() })
},
}
);
// payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
// channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
let current_height: u32 = self.best_block.read().unwrap().height();
- if (outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+ if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 {
let mut err_data = Vec::with_capacity(12);
err_data.extend_from_slice(&amt_msat.to_be_bytes());
err_data.extend_from_slice(¤t_height.to_be_bytes());
// Note that this is likely a timing oracle for detecting whether an scid is a
// phantom or an intercept.
if (self.default_configuration.accept_intercept_htlcs &&
- fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
- fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
+ fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
+ fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
{
None
} else {
};
let unsigned = msgs::UnsignedChannelUpdate {
- chain_hash: self.genesis_hash,
+ chain_hash: self.chain_hash,
short_channel_id,
timestamp: chan.context.get_update_time_counter(),
flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
/// In general, a path may raise:
/// * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee,
/// node public key) is specified.
- /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
- /// (including due to previous monitor update failure or new permanent monitor update
- /// failure).
+ /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available as it has been
+ /// closed, doesn't exist, or the peer is currently disconnected.
/// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
/// relevant updates.
///
/// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
/// determine the ultimate status of a payment.
///
- /// # Requested Invoices
- ///
- /// In the case of paying a [`Bolt12Invoice`], abandoning the payment prior to receiving the
- /// invoice will result in an [`Event::InvoiceRequestFailed`] and prevent any attempts at paying
- /// it once received. The other events may only be generated once the invoice has been received.
- ///
/// # Restart Behavior
///
/// If an [`Event::PaymentFailed`] is generated and we restart without first persisting the
- /// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated; likewise for
- /// [`Event::InvoiceRequestFailed`].
- ///
- /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+ /// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated.
pub fn abandon_payment(&self, payment_id: PaymentId) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events);
/// Handles the generation of a funding transaction, optionally (for tests) with a function
/// which checks the correctness of the funding transaction given the associated channel.
- fn funding_transaction_generated_intern<FundingOutput: Fn(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
- &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
+ fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
+ &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
+ mut find_funding_output: FundingOutput,
) -> Result<(), APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
let funding_txo = find_funding_output(&chan, &funding_transaction)?;
- let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
+ let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &self.logger)
.map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
let channel_id = chan.context.channel_id();
let user_id = chan.context.get_user_id();
#[cfg(test)]
pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
- self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| {
+ self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_, tx| {
Ok(OutPoint { txid: tx.txid(), index: output_index })
})
}
/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
/// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
pub fn funding_transaction_generated(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
+ self.batch_funding_transaction_generated(&[(temporary_channel_id, counterparty_node_id)], funding_transaction)
+ }
+
+ /// Call this upon creation of a batch funding transaction for the given channels.
+ ///
+ /// Return values are identical to [`Self::funding_transaction_generated`], respective to
+ /// each individual channel and transaction output.
+ ///
+ /// Do NOT broadcast the funding transaction yourself. This batch funding transaction
+ /// will only be broadcast when we have safely received and persisted the counterparty's
+ /// signature for each channel.
+ ///
+ /// If there is an error, all channels in the batch are to be considered closed.
+ pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ let mut result = Ok(());
if !funding_transaction.is_coin_base() {
for inp in funding_transaction.input.iter() {
if inp.witness.is_empty() {
- return Err(APIError::APIMisuseError {
+ result = result.and(Err(APIError::APIMisuseError {
err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
- });
+ }));
}
}
}
+ if funding_transaction.output.len() > u16::max_value() as usize {
+ result = result.and(Err(APIError::APIMisuseError {
+ err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+ }));
+ }
{
let height = self.best_block.read().unwrap().height();
// Transactions are evaluated as final by network mempools if their locktime is strictly
// node might not have perfect sync about their blockchain views. Thus, if the wallet
// module is ahead of LDK, only allow one more block of headroom.
if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 1 {
- return Err(APIError::APIMisuseError {
+ result = result.and(Err(APIError::APIMisuseError {
err: "Funding transaction absolute timelock is non-final".to_owned()
- });
+ }));
}
}
- self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
- if tx.output.len() > u16::max_value() as usize {
- return Err(APIError::APIMisuseError {
- err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
- });
- }
- let mut output_index = None;
- let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
- for (idx, outp) in tx.output.iter().enumerate() {
- if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
- if output_index.is_some() {
+ let txid = funding_transaction.txid();
+ let is_batch_funding = temporary_channels.len() > 1;
+ let mut funding_batch_states = if is_batch_funding {
+ Some(self.funding_batch_states.lock().unwrap())
+ } else {
+ None
+ };
+ let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
+ match states.entry(txid) {
+ btree_map::Entry::Occupied(_) => {
+ result = result.clone().and(Err(APIError::APIMisuseError {
+ err: "Batch funding transaction with the same txid already exists".to_owned()
+ }));
+ None
+ },
+ btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
+ }
+ });
+ for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
+ result = result.and_then(|_| self.funding_transaction_generated_intern(
+ temporary_channel_id,
+ counterparty_node_id,
+ funding_transaction.clone(),
+ is_batch_funding,
+ |chan, tx| {
+ let mut output_index = None;
+ let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
+ for (idx, outp) in tx.output.iter().enumerate() {
+ if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
+ if output_index.is_some() {
+ return Err(APIError::APIMisuseError {
+ err: "Multiple outputs matched the expected script and value".to_owned()
+ });
+ }
+ output_index = Some(idx as u16);
+ }
+ }
+ if output_index.is_none() {
return Err(APIError::APIMisuseError {
- err: "Multiple outputs matched the expected script and value".to_owned()
+ err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
});
}
- output_index = Some(idx as u16);
+ let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
+ if let Some(funding_batch_state) = funding_batch_state.as_mut() {
+ funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
+ }
+ Ok(outpoint)
+ })
+ );
+ }
+ if let Err(ref e) = result {
+ // Remaining channels need to be removed on any error.
+ let e = format!("Error in transaction funding: {:?}", e);
+ let mut channels_to_remove = Vec::new();
+ channels_to_remove.extend(funding_batch_states.as_mut()
+ .and_then(|states| states.remove(&txid))
+ .into_iter().flatten()
+ .map(|(chan_id, node_id, _state)| (chan_id, node_id))
+ );
+ channels_to_remove.extend(temporary_channels.iter()
+ .map(|(&chan_id, &node_id)| (chan_id, node_id))
+ );
+ let mut shutdown_results = Vec::new();
+ {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ for (channel_id, counterparty_node_id) in channels_to_remove {
+ per_peer_state.get(&counterparty_node_id)
+ .map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
+ .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
+ .map(|mut chan| {
+ update_maps_on_chan_removal!(self, &chan.context());
+ self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
+ shutdown_results.push(chan.context_mut().force_shutdown(false));
+ });
}
}
- if output_index.is_none() {
- return Err(APIError::APIMisuseError {
- err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
- });
+ for shutdown_result in shutdown_results.drain(..) {
+ self.finish_close_channel(shutdown_result);
}
- Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() })
- })
+ }
+ result
}
/// Atomically applies partial updates to the [`ChannelConfig`] of the given channels.
for channel_id in channel_ids {
if !peer_state.has_channel(channel_id) {
return Err(APIError::ChannelUnavailable {
- err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", channel_id, counterparty_node_id),
+ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
});
};
}
next_hop_channel_id, next_node_id)
}),
None => return Err(APIError::ChannelUnavailable {
- err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
+ err: format!("Channel with id {} not found for the passed counterparty node_id {}",
next_hop_channel_id, next_node_id)
})
}
}
if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
- if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
+ if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
let next_hop = match onion_utils::decode_next_payment_hop(
phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
}
}
}
- let (counterparty_node_id, forward_chan_id) = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
- Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
+ let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
+ let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
+ Some((cp_id, chan_id)) => (cp_id, chan_id),
None => {
forwarding_channel_not_found!();
continue;
if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
- log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+ if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
+ log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+ }
return NotifyOption::SkipPersistNoEvents;
}
if !chan.context.is_live() {
/// with the current [`ChannelConfig`].
/// * Removing peers which have disconnected but and no longer have any channels.
/// * Force-closing and removing channels which have not completed establishment in a timely manner.
+ /// * Forgetting about stale outbound payments, either those that have already been fulfilled
+ /// or those awaiting an invoice that hasn't been delivered in the necessary amount of time.
+ /// The latter is determined using the system clock in `std` and the block time minus two
+ /// hours in `no-std`.
///
/// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
/// estimate fetches.
}
for shutdown_res in shutdown_channels {
- self.finish_force_close_channel(shutdown_res);
+ self.finish_close_channel(shutdown_res);
}
- self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
+ #[cfg(feature = "std")]
+ let duration_since_epoch = std::time::SystemTime::now()
+ .duration_since(std::time::SystemTime::UNIX_EPOCH)
+ .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
+ #[cfg(not(feature = "std"))]
+ let duration_since_epoch = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64
+ );
+
+ self.pending_outbound_payments.remove_stale_payments(
+ duration_since_epoch, &self.pending_events
+ );
// Technically we don't need to do this here, but if we have holding cell entries in a
// channel that need freeing, it's better to do that here and block a background task
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
// likely to be lost on restart!
- if msg.chain_hash != self.genesis_hash {
+ if msg.chain_hash != self.chain_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
}
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
- self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
+ self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
if let Some(announcement_sigs) = announcement_sigs_opt {
log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
}
if let Some(shutdown_res) = finish_shutdown {
- self.finish_force_close_channel(shutdown_res);
+ self.finish_close_channel(shutdown_res);
}
Ok(())
}
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
- let mut shutdown_result = None;
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
- let (tx, chan_option) = {
+ let (tx, chan_option, shutdown_result) = {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
+ let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
+ debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
if let Some(msg) = closing_signed {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id: counterparty_node_id.clone(),
// also implies there are no pending HTLCs left on the channel, so we can
// fully delete it from tracking (the channel monitor is still around to
// watch for old state broadcasts)!
- (tx, Some(remove_channel_phase!(self, chan_phase_entry)))
- } else { (tx, None) }
+ (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result)
+ } else { (tx, None, shutdown_result) }
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
});
}
self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
- shutdown_result = Some((None, Vec::new()));
}
- mem::drop(peer_state_mutex);
mem::drop(per_peer_state);
if let Some(shutdown_result) = shutdown_result {
- self.finish_force_close_channel(shutdown_result);
+ self.finish_close_channel(shutdown_result);
}
Ok(())
}
},
hash_map::Entry::Vacant(entry) => {
if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
- fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash)
+ fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
{
let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner());
let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: try_chan_phase_entry!(self, chan.announcement_signatures(
- &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
+ &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
msg, &self.default_configuration
), chan_phase_entry),
// Note that announcement_signatures fails if the channel cannot be announced,
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersistNoEvents);
} else {
- log_debug!(self.logger, "Received channel_update for channel {}.", chan_id);
- try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
+ log_debug!(self.logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
+ let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
+ // If nothing changed after applying their update, we don't need to bother
+ // persisting.
+ if !did_change {
+ return Ok(NotifyOption::SkipPersistNoEvents);
+ }
}
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
- MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ MsgHandleErrInternal::send_err_msg_no_close(
+ format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
+ msg.channel_id
+ )
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
// freed HTLCs to fail backwards. If in the future we no longer drop pending
// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
- msg, &self.logger, &self.node_signer, self.genesis_hash,
+ msg, &self.logger, &self.node_signer, self.chain_hash,
&self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
let mut channel_update = None;
if let Some(msg) = responses.shutdown_msg {
"Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
}
},
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ hash_map::Entry::Vacant(_) => {
+ log_debug!(self.logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
+ log_bytes!(msg.channel_id.0));
+ // Unfortunately, lnd doesn't force close on errors
+ // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
+ // One of the few ways to get an lnd counterparty to force close is by
+ // replicating what they do when restoring static channel backups (SCBs). They
+ // send an invalid `ChannelReestablish` with `0` commitment numbers and an
+ // invalid `your_last_per_commitment_secret`.
+ //
+ // Since we received a `ChannelReestablish` for a channel that doesn't exist, we
+ // can assume it's likely the channel closed from our point of view, but it
+ // remains open on the counterparty's side. By sending this bogus
+ // `ChannelReestablish` message now as a response to theirs, we trigger them to
+ // force close broadcasting their latest state. If the closing transaction from
+ // our point of view remains unconfirmed, it'll enter a race with the
+ // counterparty's to-be-broadcast latest commitment transaction.
+ peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
+ node_id: *counterparty_node_id,
+ msg: msgs::ChannelReestablish {
+ channel_id: msg.channel_id,
+ next_local_commitment_number: 0,
+ next_remote_commitment_number: 0,
+ your_last_per_commitment_secret: [1u8; 32],
+ my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
+ next_funding_txid: None,
+ },
+ });
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
+ counterparty_node_id), msg.channel_id)
+ )
+ }
}
};
self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
+ action: msgs::ErrorAction::DisconnectPeer {
+ msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
},
});
}
}
for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
+ self.finish_close_channel(failure);
}
has_pending_monitor_events
fn maybe_generate_initial_closing_signed(&self) -> bool {
let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
let mut has_update = false;
- let mut shutdown_result = None;
+ let mut shutdown_results = Vec::new();
{
let per_peer_state = self.per_peer_state.read().unwrap();
match phase {
ChannelPhase::Funded(chan) => {
match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
- Ok((msg_opt, tx_opt)) => {
+ Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
if let Some(msg) = msg_opt {
has_update = true;
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id: chan.context.get_counterparty_node_id(), msg,
});
}
+ debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
+ if let Some(shutdown_result) = shutdown_result_opt {
+ shutdown_results.push(shutdown_result);
+ }
if let Some(tx) = tx_opt {
// We're done with this channel. We got a closing_signed and sent back
// a closing_signed with a closing transaction to broadcast.
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transactions(&[&tx]);
update_maps_on_chan_removal!(self, &chan.context);
- shutdown_result = Some((None, Vec::new()));
false
} else { true }
},
let _ = handle_error!(self, err, counterparty_node_id);
}
- if let Some(shutdown_result) = shutdown_result {
- self.finish_force_close_channel(shutdown_result);
+ for shutdown_result in shutdown_results.drain(..) {
+ self.finish_close_channel(shutdown_result);
}
has_update
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
// timer_tick_occurred, guaranteeing we're running normally.
- if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() {
+ if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert!(should_broadcast);
counterparty_node_id, funding_txo, update
});
}
- self.finish_force_close_channel(failure);
+ self.finish_close_channel(failure);
}
}
+ /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
+ /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
+ /// not have an expiration unless otherwise set on the builder.
+ ///
+ /// Uses a one-hop [`BlindedPath`] for the offer with [`ChannelManager::get_our_node_id`] as the
+ /// introduction node and a derived signing pubkey for recipient privacy. As such, currently,
+ /// the node must be announced. Otherwise, there is no way to find a path to the introduction
+ /// node in order to send the [`InvoiceRequest`].
+ ///
+ /// [`Offer`]: crate::offers::offer::Offer
+ /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+ pub fn create_offer_builder(
+ &self, description: String
+ ) -> OfferBuilder<DerivedMetadata, secp256k1::All> {
+ let node_id = self.get_our_node_id();
+ let expanded_key = &self.inbound_payment_key;
+ let entropy = &*self.entropy_source;
+ let secp_ctx = &self.secp_ctx;
+ let path = self.create_one_hop_blinded_path();
+
+ OfferBuilder::deriving_signing_pubkey(description, node_id, expanded_key, entropy, secp_ctx)
+ .chain_hash(self.chain_hash)
+ .path(path)
+ }
+
+ /// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
+ /// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund. The builder will
+ /// have the provided expiration set. Any changes to the expiration on the returned builder will
+ /// not be honored by [`ChannelManager`].
+ ///
+ /// The provided `payment_id` is used to ensure that only one invoice is paid for the refund.
+ ///
+ /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as
+ /// the introduction node and a derived payer id for sender privacy. As such, currently, the
+ /// node must be announced. Otherwise, there is no way to find a path to the introduction node
+ /// in order to send the [`Bolt12Invoice`].
+ ///
+ /// [`Refund`]: crate::offers::refund::Refund
+ /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+ pub fn create_refund_builder(
+ &self, description: String, amount_msats: u64, absolute_expiry: Duration,
+ payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
+ ) -> Result<RefundBuilder<secp256k1::All>, Bolt12SemanticError> {
+ let node_id = self.get_our_node_id();
+ let expanded_key = &self.inbound_payment_key;
+ let entropy = &*self.entropy_source;
+ let secp_ctx = &self.secp_ctx;
+ let path = self.create_one_hop_blinded_path();
+
+ let builder = RefundBuilder::deriving_payer_id(
+ description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
+ )?
+ .chain_hash(self.chain_hash)
+ .absolute_expiry(absolute_expiry)
+ .path(path);
+
+ self.pending_outbound_payments
+ .add_new_awaiting_invoice(
+ payment_id, absolute_expiry, retry_strategy, max_total_routing_fee_msat,
+ )
+ .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
+
+ Ok(builder)
+ }
+
/// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
/// to pay us.
///
inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
}
+ /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
+ /// node.
+ fn create_one_hop_blinded_path(&self) -> BlindedPath {
+ let entropy_source = self.entropy_source.deref();
+ let secp_ctx = &self.secp_ctx;
+ BlindedPath::one_hop_for_message(self.get_our_node_id(), entropy_source, secp_ctx).unwrap()
+ }
+
/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
/// are used when constructing the phantom invoice's route hints.
///
let best_block_height = self.best_block.read().unwrap().height();
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
- let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
+ let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
// Ensure the generated scid doesn't conflict with a real channel.
match short_to_chan_info.get(&scid_candidate) {
Some(_) => continue,
let best_block_height = self.best_block.read().unwrap().height();
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
- let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
+ let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
// Ensure the generated scid doesn't conflict with a real channel.
if short_to_chan_info.contains_key(&scid_candidate) { continue }
return scid_candidate
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
- self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
}
}
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
- self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
+ self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)
.map(|(a, b)| (a, Vec::new(), b)));
let last_best_block_height = self.best_block.read().unwrap().height();
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
- self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
+ self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
}
}
self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
macro_rules! max_time {
($timestamp: expr) => {
msg: announcement_sigs,
});
if let Some(height) = height_opt {
- if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
+ if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.chain_hash, height, &self.default_configuration) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: announcement,
// Note that announcement_signatures fails if the channel cannot be announced,
self.issue_channel_close_events(&channel.context, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
- channel_id: channel.context.channel_id(),
- data: reason_message,
- } },
+ action: msgs::ErrorAction::DisconnectPeer {
+ msg: Some(msgs::ErrorMessage {
+ channel_id: channel.context.channel_id(),
+ data: reason_message,
+ })
+ },
});
return false;
}
fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
self, || NotifyOption::SkipPersistHandleEvents);
-
let mut failed_channels = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
peer_state.channel_by_id.retain(|_, phase| {
let context = match phase {
ChannelPhase::Funded(chan) => {
- chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- // We only retain funded channels that are not shutdown.
- if !chan.is_shutdown() {
+ if chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger).is_ok() {
+ // We only retain funded channels that are not shutdown.
return true;
}
- &chan.context
+ &mut chan.context
},
// Unfunded channels will always be removed.
ChannelPhase::UnfundedOutboundV1(chan) => {
- &chan.context
+ &mut chan.context
},
ChannelPhase::UnfundedInboundV1(chan) => {
- &chan.context
+ &mut chan.context
},
};
// Clean up for removal.
update_maps_on_chan_removal!(self, &context);
self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
- failed_channels.push((None, Vec::new()));
+ failed_channels.push(context.force_shutdown(false));
false
});
// Note that we don't bother generating any events for pre-accept channels -
mem::drop(per_peer_state);
for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
+ self.finish_close_channel(failure);
}
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
- if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: *counterparty_node_id,
msg,
provided_init_features(&self.default_configuration)
}
- fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
- Some(vec![ChainHash::from(&self.genesis_hash[..])])
+ fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
+ Some(vec![self.chain_hash])
}
fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
(10, self.channel_value_satoshis, required),
(12, self.unspendable_punishment_reserve, option),
(14, user_channel_id_low, required),
- (16, self.next_outbound_htlc_limit_msat, required), // Forwards compatibility for removed balance_msat field.
+ (16, self.balance_msat, required),
(18, self.outbound_capacity_msat, required),
(19, self.next_outbound_htlc_limit_msat, required),
(20, self.inbound_capacity_msat, required),
(10, channel_value_satoshis, required),
(12, unspendable_punishment_reserve, option),
(14, user_channel_id_low, required),
- (16, _balance_msat, option), // Backwards compatibility for removed balance_msat field.
+ (16, balance_msat, required),
(18, outbound_capacity_msat, required),
// Note that by the time we get past the required read above, outbound_capacity_msat will be
// filled in, so we can safely unwrap it here.
let user_channel_id = user_channel_id_low as u128 +
((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
- let _balance_msat: Option<u64> = _balance_msat;
-
Ok(Self {
inbound_scid_alias,
channel_id: channel_id.0.unwrap(),
channel_value_satoshis: channel_value_satoshis.0.unwrap(),
unspendable_punishment_reserve,
user_channel_id,
+ balance_msat: balance_msat.0.unwrap(),
outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
- self.genesis_hash.write(writer)?;
+ self.chain_hash.write(writer)?;
{
let best_block = self.best_block.read().unwrap();
best_block.height().write(writer)?;
}
number_of_funded_channels += peer_state.channel_by_id.iter().filter(
- |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_initiated() } else { false }
+ |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false }
).count();
}
let peer_state = &mut *peer_state_lock;
for channel in peer_state.channel_by_id.iter().filter_map(
|(_, phase)| if let ChannelPhase::Funded(channel) = phase {
- if channel.context.is_funding_initiated() { Some(channel) } else { None }
+ if channel.context.is_funding_broadcast() { Some(channel) } else { None }
} else { None }
) {
channel.write(writer)?;
fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
- let genesis_hash: BlockHash = Readable::read(reader)?;
+ let chain_hash: ChainHash = Readable::read(reader)?;
let best_block_height: u32 = Readable::read(reader)?;
let best_block_hash: BlockHash = Readable::read(reader)?;
log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
}
- let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
- if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
+ let mut shutdown_result = channel.context.force_shutdown(true);
+ if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
+ return Err(DecodeError::InvalidValue);
+ }
+ if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id, funding_txo, update
});
}
- failed_htlcs.append(&mut new_failed_htlcs);
+ failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
- if channel.context.is_funding_initiated() {
+ if channel.context.is_funding_broadcast() {
id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
}
match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
let mut outbound_scid_alias;
loop {
outbound_scid_alias = fake_scid::Namespace::OutboundAlias
- .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
+ .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
}
chan.context.set_outbound_scid_alias(outbound_scid_alias);
}
let channel_manager = ChannelManager {
- genesis_hash,
+ chain_hash,
fee_estimator: bounded_fee_estimator,
chain_monitor: args.chain_monitor,
tx_broadcaster: args.tx_broadcaster,
event_persist_notifier: Notifier::new(),
needs_persist_flag: AtomicBool::new(false),
+ funding_batch_states: Mutex::new(BTreeMap::new()),
+
entropy_source: args.entropy_source,
node_signer: args.node_signer,
signer_provider: args.signer_provider,
check_api_error_message(expected_message, res_err)
}
+ fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
+ let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
+ check_api_error_message(expected_message, res_err)
+ }
+
+ fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
+ let expected_message = "No such channel awaiting to be accepted.".to_string();
+ check_api_error_message(expected_message, res_err)
+ }
+
fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
match res_err {
Err(APIError::APIMisuseError { err }) => {
check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
}
+ #[test]
+ fn test_api_calls_with_unavailable_channel() {
+ // Tests that our API functions that expects a `counterparty_node_id` and a `channel_id`
+ // as input, behaves as expected if the `counterparty_node_id` is a known peer in the
+ // `ChannelManager::per_peer_state` map, but the peer state doesn't contain a channel with
+ // the given `channel_id`.
+ let chanmon_cfg = create_chanmon_cfgs(2);
+ let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+ let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
+ let nodes = create_network(2, &node_cfg, &node_chanmgr);
+
+ let counterparty_node_id = nodes[1].node.get_our_node_id();
+
+ // Dummy values
+ let channel_id = ChannelId::from_bytes([4; 32]);
+
+ // Test the API functions.
+ check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
+
+ check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+
+ check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+
+ check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+
+ check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
+
+ check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
+ }
+
#[test]
fn test_connection_limiting() {
// Test that we limit un-channel'd peers and un-funded channels properly.
sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok());
}
+ #[test]
+ fn test_final_incorrect_cltv(){
+ let chanmon_cfg = create_chanmon_cfgs(1);
+ let node_cfg = create_node_cfgs(1, &chanmon_cfg);
+ let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
+ let node = create_network(1, &node_cfg, &node_chanmgr);
+
+ let result = node[0].node.construct_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
+ amt_msat: 100,
+ outgoing_cltv_value: 22,
+ payment_metadata: None,
+ keysend_preimage: None,
+ payment_data: Some(msgs::FinalOnionHopData {
+ payment_secret: PaymentSecret([0; 32]), total_msat: 100,
+ }),
+ custom_tlvs: Vec::new(),
+ }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None);
+
+ // Should not return an error as this condition:
+ // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
+ // is not satisfied.
+ assert!(result.is_ok());
+ }
+
#[test]
fn test_inbound_anchors_manual_acceptance() {
// Tests that we properly limit inbound channels when we have the manual-channel-acceptance
let payment_preimage = PaymentPreimage([42; 32]);
assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
}
+
+ #[test]
+ fn test_trigger_lnd_force_close() {
+ let chanmon_cfg = create_chanmon_cfgs(2);
+ let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+ let user_config = test_default_channel_config();
+ let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
+ let nodes = create_network(2, &node_cfg, &node_chanmgr);
+
+ // Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+ check_closed_broadcast(&nodes[0], 1, true);
+ check_added_monitors(&nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ {
+ let txn = nodes[0].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ }
+
+ // Since they're disconnected, Bob won't receive Alice's `Error` message. Reconnect them
+ // such that Bob sends a `ChannelReestablish` to Alice since the channel is still open from
+ // their side.
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ let channel_reestablish = get_event_msg!(
+ nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
+ );
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &channel_reestablish);
+
+ // Alice should respond with an error since the channel isn't known, but a bogus
+ // `ChannelReestablish` should be sent first, such that we actually trigger Bob to force
+ // close even if it was an lnd node.
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 2);
+ if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ assert_eq!(msg.next_local_commitment_number, 0);
+ assert_eq!(msg.next_remote_commitment_number, 0);
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &msg);
+ } else { panic!() };
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_added_monitors(&nodes[1], 1);
+ let expected_close_reason = ClosureReason::ProcessingError {
+ err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
+ };
+ check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
+ {
+ let txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ }
+ }
}
#[cfg(ldk_bench)]